summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2019-05-17 12:50:43 +0200
committerHeiko Carstens <heiko.carstens@de.ibm.com>2019-06-04 15:03:12 +0200
commita646ef398e72a2ac40bea974808ffcf1bea4e7f4 (patch)
tree6799167289a134b5c6f7c3d4cf4e1e52165f46b4
parentLinux 5.2-rc3 (diff)
downloadlinux-a646ef398e72a2ac40bea974808ffcf1bea4e7f4.tar.xz
linux-a646ef398e72a2ac40bea974808ffcf1bea4e7f4.zip
s390/jump_label: replace stop_machine with smp_call_function
The use of stop_machine to replace the mask bits of the jump label branch is a very heavy-weight operation. This is in fact not necessary, the mask of the branch can simply be updated, followed by a signal processor to all the other CPUs to force them to pick up the modified instruction. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [heiko.carstens@de.ibm.com]: Change jump_label_make_nop() so we get brcl 0,offset instead of brcl 0,0. This makes sure that only the mask part of the instruction gets changed when updated. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
-rw-r--r--arch/s390/kernel/jump_label.c18
-rw-r--r--arch/s390/mm/maccess.c9
2 files changed, 10 insertions, 17 deletions
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 3f10b56bd5a3..e193630a7d2a 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,9 +22,9 @@ struct insn_args {
static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
{
- /* brcl 0,0 */
+ /* brcl 0,offset */
insn->opcode = 0xc004;
- insn->offset = 0;
+ insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1;
}
static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
@@ -77,23 +77,15 @@ static void __jump_label_transform(struct jump_entry *entry,
s390_kernel_write(code, &new, sizeof(new));
}
-static int __sm_arch_jump_label_transform(void *data)
+static void __jump_label_sync(void *dummy)
{
- struct insn_args *args = data;
-
- __jump_label_transform(args->entry, args->type, 0);
- return 0;
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- struct insn_args args;
-
- args.entry = entry;
- args.type = type;
-
- stop_machine_cpuslocked(__sm_arch_jump_label_transform, &args, NULL);
+ __jump_label_transform(entry, type, 0);
+ smp_call_function(__jump_label_sync, NULL, 1);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 818deeb1ebc3..1864a8bb9622 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -52,21 +52,22 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
* Therefore we have a read-modify-write sequence: the function reads eight
* bytes from destination at an eight byte boundary, modifies the bytes
* requested and writes the result back in a loop.
- *
- * Note: this means that this function may not be called concurrently on
- * several cpus with overlapping words, since this may potentially
- * cause data corruption.
*/
+static DEFINE_SPINLOCK(s390_kernel_write_lock);
+
void notrace s390_kernel_write(void *dst, const void *src, size_t size)
{
+ unsigned long flags;
long copied;
+ spin_lock_irqsave(&s390_kernel_write_lock, flags);
while (size) {
copied = s390_kernel_write_odd(dst, src, size);
dst += copied;
src += copied;
size -= copied;
}
+ spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
}
static int __memcpy_real(void *dest, void *src, size_t count)