summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/hypfs/inode.c53
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/include/asm/pci_io.h1
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/topology.h24
-rw-r--r--arch/s390/kernel/cache.c25
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/jump_label.c12
-rw-r--r--arch/s390/kernel/module.c1
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c54
-rw-r--r--arch/s390/kernel/topology.c134
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S6
-rw-r--r--arch/s390/kvm/kvm-s390.c69
-rw-r--r--arch/s390/kvm/kvm-s390.h3
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/mm/mmap.c5
-rw-r--r--arch/s390/pci/pci.c60
-rw-r--r--arch/s390/pci/pci_mmio.c17
22 files changed, 249 insertions, 259 deletions
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index ad66b07f742e..df7d8cbee377 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
parent = dentry->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
if (hypfs_positive(dentry)) {
- if (S_ISDIR(dentry->d_inode->i_mode))
+ if (d_is_dir(dentry))
simple_rmdir(parent->d_inode, dentry);
else
simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
return nonseekable_open(inode, filp);
}
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- char *data;
- ssize_t ret;
- struct file *filp = iocb->ki_filp;
- /* XXX: temporary */
- char __user *buf = iov[0].iov_base;
- size_t count = iov[0].iov_len;
-
- if (nr_segs != 1)
- return -EINVAL;
-
- data = filp->private_data;
- ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
- if (ret <= 0)
- return ret;
+ struct file *file = iocb->ki_filp;
+ char *data = file->private_data;
+ size_t available = strlen(data);
+ loff_t pos = iocb->ki_pos;
+ size_t count;
- iocb->ki_pos += ret;
- file_accessed(filp);
-
- return ret;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !iov_iter_count(to))
+ return 0;
+ count = copy_to_iter(data + pos, available - pos, to);
+ if (!count)
+ return -EFAULT;
+ iocb->ki_pos = pos + count;
+ file_accessed(file);
+ return count;
}
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
int rc;
struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
struct hypfs_sb_info *fs_info = sb->s_fs_info;
- size_t count = iov_length(iov, nr_segs);
+ size_t count = iov_iter_count(from);
/*
* Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
hypfs_update_update(sb);
rc = count;
+ iov_iter_advance(from, count);
out:
mutex_unlock(&fs_info->lock);
return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
static const struct file_operations hypfs_file_ops = {
.open = hypfs_open,
.release = hypfs_release,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = hypfs_aio_read,
- .aio_write = hypfs_aio_write,
+ .read = new_sync_read,
+ .write = new_sync_write,
+ .read_iter = hypfs_read_iter,
+ .write_iter = hypfs_write_iter,
.llseek = no_llseek,
};
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..f407bbf5ee94 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -515,15 +515,15 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \
(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
-struct s390_model_fac {
- /* facilities used in SIE context */
- __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
- /* subset enabled by kvm */
- __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
};
struct kvm_s390_cpu_model {
- struct s390_model_fac *fac;
+ struct kvm_s390_fac *fac;
struct cpuid cpu_id;
unsigned short ibc;
};
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..8fb3802f8fad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif
}
-static inline void clear_page(void *page)
-{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
-}
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index f664e96f48c7..1a9a98de5bde 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -16,6 +16,7 @@
struct zpci_iomap_entry {
u32 fh;
u8 bar;
+ u16 count;
};
extern struct zpci_iomap_entry *zpci_iomap_start;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fbb5ee3ae57c..e08ec38f8c6e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
*/
#define PTRS_PER_PTE 256
#ifndef CONFIG_64BIT
+#define __PAGETABLE_PUD_FOLDED
#define PTRS_PER_PMD 1
+#define __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PUD 1
#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c4fbb9527c5c..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
cpumask_t book_mask;
};
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
-#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
#define mc_capable() 1
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 632fa06ea162..0969d113b3d6 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
{
if (level >= CACHE_MAX_LEVEL)
return CACHE_TYPE_NOCACHE;
-
ci += level;
-
if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
return CACHE_TYPE_NOCACHE;
-
return cache_type_map[ci->type];
}
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
}
static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
- enum cache_type type, unsigned int level)
+ enum cache_type type, unsigned int level, int cpu)
{
int ti, num_sets;
- int cpu = smp_processor_id();
if (type == CACHE_TYPE_INST)
ti = CACHE_TI_INSTRUCTION;
else
ti = CACHE_TI_UNIFIED;
-
this_leaf->level = level + 1;
this_leaf->type = type;
this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
- this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
- level, ti);
+ this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-
num_sets = this_leaf->size / this_leaf->coherency_line_size;
num_sets /= this_leaf->ways_of_associativity;
this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
if (!this_cpu_ci)
return -EINVAL;
-
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
do {
ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
/* Separate instruction and data caches */
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
} while (++level < CACHE_MAX_LEVEL);
-
this_cpu_ci->num_levels = level;
this_cpu_ci->num_leaves = leaves;
-
return 0;
}
int populate_cache_leaves(unsigned int cpu)
{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
unsigned int level, idx, pvt;
union cache_topology ct;
enum cache_type ctype;
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct cacheinfo *this_leaf = this_cpu_ci->info_list;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
if (!this_leaf)
return -EINVAL;
-
pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
ctype = get_cache_type(&ct.ci[0], level);
if (ctype == CACHE_TYPE_SEPARATE) {
- ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
- ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
} else {
- ci_leaf_init(this_leaf++, pvt, ctype, level);
+ ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
}
}
return 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 70a329450901..4427ab7ac23a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
- if (test_facility(128))
- S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
#endif
}
-static int __init nocad_setup(char *str)
+static int __init cad_setup(char *str)
{
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+ int val;
+
+ get_option(&str, &val);
+ if (val && test_facility(128))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
return 0;
}
-early_param("nocad", nocad_setup);
+early_param("cad", cad_setup);
static int __init cad_init(void)
{
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..830066f936c8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
insn->offset = (entry->target - entry->code) >> 1;
}
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+ struct insn *new)
{
unsigned char *ipc = (unsigned char *)entry->code;
- unsigned char *ipe = (unsigned char *)insn;
+ unsigned char *ipe = (unsigned char *)expected;
+ unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
+ ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
panic("Corrupted kernel text");
}
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry,
}
if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp((void *)entry->code, &old, sizeof(old)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &old, &new);
}
probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..2ca95862e336 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
static DEFINE_PER_CPU(struct cpuid, cpu_id);
-void cpu_relax(void)
+void notrace cpu_relax(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfac77ada4f2..a5ea8bc17cb3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
setup_lowcore();
smp_fill_possible_mask();
cpu_init();
- s390_init_cpu_topology();
/*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a668993ff577..db8f1115a3bf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,14 +59,13 @@ enum {
CPU_STATE_CONFIGURED,
};
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
struct pcpu {
- struct cpu *cpu;
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
- unsigned long async_stack; /* async stack for the cpu */
- unsigned long panic_stack; /* panic stack for the cpu */
unsigned long ec_mask; /* bit mask for ec_xxx functions */
- int state; /* physical cpu state */
- int polarization; /* physical polarization */
+ signed char state; /* physical cpu state */
+ signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
};
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
pcpu_sigp_retry(pcpu, order, 0);
}
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
+ unsigned long async_stack, panic_stack;
struct _lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct _lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- pcpu->panic_stack = __get_free_page(GFP_KERNEL);
- if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+ async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ panic_stack = __get_free_page(GFP_KERNEL);
+ if (!pcpu->lowcore || !panic_stack || !async_stack)
goto out;
+ } else {
+ async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+ panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
}
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE
- - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
- - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+ lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
#ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
return 0;
out:
if (pcpu != &pcpu_devices[0]) {
- free_page(pcpu->panic_stack);
- free_pages(pcpu->async_stack, ASYNC_ORDER);
+ free_page(panic_stack);
+ free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
#else
vdso_free_per_cpu(pcpu->lowcore);
#endif
- if (pcpu != &pcpu_devices[0]) {
- free_page(pcpu->panic_stack);
- free_pages(pcpu->async_stack, ASYNC_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
- }
+ if (pcpu == &pcpu_devices[0])
+ return;
+ free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+ free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+ free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->panic_stack + PAGE_SIZE);
+ pcpu_devices->lowcore->panic_stack -
+ PANIC_FRAME_OFFSET + PAGE_SIZE);
}
int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = stap();
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
- + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
- + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
- struct cpu *c = pcpu_devices[cpu].cpu;
- struct device *s = &c->dev;
+ struct device *s = &per_cpu(cpu_device, cpu)->dev;
int err = 0;
switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
- pcpu_devices[cpu].cpu = c;
+ per_cpu(cpu_device, cpu) = c;
s = &c->dev;
c->hotpluggable = 1;
rc = register_cpu(c, cpu);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 24ee33f1af24..14da43b801d9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,14 +7,14 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/workqueue.h>
-#include <linux/bootmem.h>
#include <linux/cpuset.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
static struct mask_info socket_info;
static struct mask_info book_info;
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
{
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
- cpu_topology[lcpu + i].book_id = book->id;
- cpu_topology[lcpu + i].core_id = rcore;
- cpu_topology[lcpu + i].thread_id = lcpu + i;
+ per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+ per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+ per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
if (one_socket_per_cpu)
- cpu_topology[lcpu + i].socket_id = rcore;
+ per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
else
- cpu_topology[lcpu + i].socket_id = socket->id;
+ per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
- cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
- cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
- cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+ per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+ per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+ per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
- cpu_topology[cpu].thread_id = cpu;
- cpu_topology[cpu].core_id = cpu;
- cpu_topology[cpu].socket_id = cpu;
- cpu_topology[cpu].book_id = cpu;
+ per_cpu(cpu_topology, cpu).thread_id = cpu;
+ per_cpu(cpu_topology, cpu).core_id = cpu;
+ per_cpu(cpu_topology, cpu).socket_id = cpu;
+ per_cpu(cpu_topology, cpu).book_id = cpu;
}
}
spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
set_topology_timer();
}
-static int __init early_parse_topology(char *p)
-{
- if (strncmp(p, "off", 3))
- return 0;
- topology_enabled = 0;
- return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
- struct mask_info *mask, int offset)
-{
- int i, nr_masks;
-
- nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
- for (i = 0; i < info->mnest - offset; i++)
- nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
- nr_masks = max(nr_masks, 1);
- for (i = 0; i < nr_masks; i++) {
- mask->next = alloc_bootmem_align(
- roundup_pow_of_two(sizeof(struct mask_info)),
- roundup_pow_of_two(sizeof(struct mask_info)));
- mask = mask->next;
- }
-}
-
-void __init s390_init_cpu_topology(void)
-{
- struct sysinfo_15_1_x *info;
- int i;
-
- if (!MACHINE_HAS_TOPOLOGY)
- return;
- tl_info = alloc_bootmem_pages(PAGE_SIZE);
- info = tl_info;
- store_topology(info);
- pr_info("The CPU configuration topology of the machine is:");
- for (i = 0; i < TOPOLOGY_NR_MAG; i++)
- printk(KERN_CONT " %d", info->mag[i]);
- printk(KERN_CONT " / %d\n", info->mnest);
- alloc_masks(info, &socket_info, 1);
- alloc_masks(info, &book_info, 2);
-}
-
static int cpu_management;
static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
const struct cpumask *cpu_thread_mask(int cpu)
{
- return &cpu_topology[cpu].thread_mask;
+ return &per_cpu(cpu_topology, cpu).thread_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
- return &cpu_topology[cpu].core_mask;
+ return &per_cpu(cpu_topology, cpu).core_mask;
}
static const struct cpumask *cpu_book_mask(int cpu)
{
- return &cpu_topology[cpu].book_mask;
+ return &per_cpu(cpu_topology, cpu).book_mask;
}
+static int __init early_parse_topology(char *p)
+{
+ if (strncmp(p, "off", 3))
+ return 0;
+ topology_enabled = 0;
+ return 0;
+}
+early_param("topology", early_parse_topology);
+
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
{ NULL, },
};
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+ struct mask_info *mask, int offset)
+{
+ int i, nr_masks;
+
+ nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+ for (i = 0; i < info->mnest - offset; i++)
+ nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+ nr_masks = max(nr_masks, 1);
+ for (i = 0; i < nr_masks; i++) {
+ mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+ mask = mask->next;
+ }
+}
+
+static int __init s390_topology_init(void)
+{
+ struct sysinfo_15_1_x *info;
+ int i;
+
+ if (!MACHINE_HAS_TOPOLOGY)
+ return 0;
+ tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+ info = tl_info;
+ store_topology(info);
+ pr_info("The CPU configuration topology of the machine is:");
+ for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+ printk(KERN_CONT " %d", info->mag[i]);
+ printk(KERN_CONT " / %d\n", info->mnest);
+ alloc_masks(info, &socket_info, 1);
+ alloc_masks(info, &book_info, 2);
+ set_sched_topology(s390_topology);
+ return 0;
+}
+early_initcall(s390_topology_init);
+
static int __init topology_init(void)
{
if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
- set_sched_topology(s390_topology);
- return 0;
-}
-early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 7699e735ae28..61541fb93dc6 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
je 4f
cghi %r2,__CLOCK_REALTIME
je 5f
- cghi %r2,__CLOCK_THREAD_CPUTIME_ID
- je 9f
- cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+ cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
aghi %r15,16
br %r14
- /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ /* CPUCLOCK_VIRT for this thread */
9: icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
ear %r2,%a4
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0c3623927563..19e17bd7aec0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP:
case KVM_CAP_S390_CSS_SUPPORT:
- case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM:
@@ -522,7 +521,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
sizeof(struct cpuid));
kvm->arch.model.ibc = proc->ibc;
- memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
+ memcpy(kvm->arch.model.fac->list, proc->fac_list,
S390_ARCH_FAC_LIST_SIZE_BYTE);
} else
ret = -EFAULT;
@@ -556,7 +555,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
}
memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
proc->ibc = kvm->arch.model.ibc;
- memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
+ memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
ret = -EFAULT;
kfree(proc);
@@ -576,10 +575,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
}
get_cpu_id((struct cpuid *) &mach->cpuid);
mach->ibc = sclp_get_ibc();
- memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
- kvm_s390_fac_list_mask_size() * sizeof(u64));
+ memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -778,15 +777,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
static int kvm_s390_query_ap_config(u8 *config)
{
u32 fcn_code = 0x04000000UL;
- u32 cc;
+ u32 cc = 0;
+ memset(config, 0, 128);
asm volatile(
"lgr 0,%1\n"
"lgr 2,%2\n"
".long 0xb2af0000\n" /* PQAP(QCI) */
- "ipm %0\n"
+ "0: ipm %0\n"
"srl %0,28\n"
- : "=r" (cc)
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+r" (cc)
: "r" (fcn_code), "r" (config)
: "cc", "0", "2", "memory"
);
@@ -839,9 +841,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
kvm_s390_set_crycb_format(kvm);
- /* Disable AES/DEA protected key functions by default */
- kvm->arch.crypto.aes_kw = 0;
- kvm->arch.crypto.dea_kw = 0;
+ /* Enable AES/DEA protected key functions by default */
+ kvm->arch.crypto.aes_kw = 1;
+ kvm->arch.crypto.dea_kw = 1;
+ get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
+ sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
return 0;
}
@@ -886,40 +892,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/*
* The architectural maximum amount of facilities is 16 kbit. To store
* this amount, 2 kbyte of memory is required. Thus we need a full
- * page to hold the active copy (arch.model.fac->sie) and the current
- * facilities set (arch.model.fac->kvm). Its address size has to be
+ * page to hold the guest facility list (arch.model.fac->list) and the
+ * facility mask (arch.model.fac->mask). Its address size has to be
* 31 bits and word aligned.
*/
kvm->arch.model.fac =
- (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.model.fac)
goto out_nofac;
- memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_U64);
-
- /*
- * If this KVM host runs *not* in a LPAR, relax the facility bits
- * of the kvm facility mask by all missing facilities. This will allow
- * to determine the right CPU model by means of the remaining facilities.
- * Live guest migration must prohibit the migration of KVMs running in
- * a LPAR to non LPAR hosts.
- */
- if (!MACHINE_IS_LPAR)
- for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
- kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
-
- /*
- * Apply the kvm facility mask to limit the kvm supported/tolerated
- * facility list.
- */
+ /* Populate the facility mask initially. */
+ memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
- kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
+ kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
else
- kvm->arch.model.fac->kvm[i] = 0UL;
+ kvm->arch.model.fac->mask[i] = 0UL;
}
+ /* Populate the facility list initially. */
+ memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
+ S390_ARCH_FAC_LIST_SIZE_BYTE);
+
kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
@@ -1165,8 +1160,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
mutex_lock(&vcpu->kvm->lock);
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
- memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
mutex_unlock(&vcpu->kvm->lock);
@@ -1212,7 +1205,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
- vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
+ vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 985c2114d7ef..c34109aa552d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
/* test availability of facility in a kvm intance */
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
{
- return __test_facility(nr, kvm->arch.model.fac->kvm);
+ return __test_facility(nr, kvm->arch.model.fac->mask) &&
+ __test_facility(nr, kvm->arch.model.fac->list);
}
/* are cpu states controlled by user space */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index bdd9b5b17e03..351116939ea2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
* We need to shift the lower 32 facility bits (bit 0-31) from a u64
* into a u32 memory representation. They will remain bits 0-31.
*/
- fac = *vcpu->kvm->arch.model.fac->sie >> 32;
+ fac = *vcpu->kvm->arch.model.fac->list >> 32;
rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
&fac, sizeof(fac));
if (rc)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d008f638b2cd..179a2c20b01f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
{
unsigned long base;
- base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+ base = STACK_TOP / 3 * 2;
+ if (!is_32bit_task())
+ /* Align to 4GB */
+ base &= ~((1UL << 32) - 1);
return base + mmap_rnd();
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3290f11ae1d9..f0b85443e060 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
}
/* Create a virtual mapping cookie for a PCI BAR */
-void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
+void __iomem *pci_iomap_range(struct pci_dev *pdev,
+ int bar,
+ unsigned long offset,
+ unsigned long max)
{
struct zpci_dev *zdev = get_zdev(pdev);
u64 addr;
@@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
idx = zdev->bars[bar].map_idx;
spin_lock(&zpci_iomap_lock);
- zpci_iomap_start[idx].fh = zdev->fh;
- zpci_iomap_start[idx].bar = bar;
+ if (zpci_iomap_start[idx].count++) {
+ BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
+ zpci_iomap_start[idx].bar != bar);
+ } else {
+ zpci_iomap_start[idx].fh = zdev->fh;
+ zpci_iomap_start[idx].bar = bar;
+ }
+ /* Detect overrun */
+ BUG_ON(!zpci_iomap_start[idx].count);
spin_unlock(&zpci_iomap_lock);
addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
- return (void __iomem *) addr;
+ return (void __iomem *) addr + offset;
}
-EXPORT_SYMBOL_GPL(pci_iomap);
+EXPORT_SYMBOL(pci_iomap_range);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
@@ -285,11 +301,15 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
spin_lock(&zpci_iomap_lock);
- zpci_iomap_start[idx].fh = 0;
- zpci_iomap_start[idx].bar = 0;
+ /* Detect underrun */
+ BUG_ON(!zpci_iomap_start[idx].count);
+ if (!--zpci_iomap_start[idx].count) {
+ zpci_iomap_start[idx].fh = 0;
+ zpci_iomap_start[idx].bar = 0;
+ }
spin_unlock(&zpci_iomap_lock);
}
-EXPORT_SYMBOL_GPL(pci_iounmap);
+EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
@@ -463,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
}
-static void zpci_map_resources(struct zpci_dev *zdev)
+static void zpci_map_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -479,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
}
-static void zpci_unmap_resources(struct zpci_dev *zdev)
+static void zpci_unmap_resources(struct pci_dev *pdev)
{
- struct pci_dev *pdev = zdev->pdev;
resource_size_t len;
int i;
@@ -631,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev)
zdev->pdev = pdev;
pdev->dev.groups = zpci_attr_groups;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i];
@@ -643,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev)
return 0;
}
+void pcibios_release_device(struct pci_dev *pdev)
+{
+ zpci_unmap_resources(pdev);
+}
+
int pcibios_enable_device(struct pci_dev *pdev, int mask)
{
struct zpci_dev *zdev = get_zdev(pdev);
@@ -650,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
return pci_enable_resources(pdev, mask);
}
@@ -659,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
- zpci_unmap_resources(zdev);
zpci_fmb_disable_device(zdev);
zpci_debug_exit_device(zdev);
zdev->pdev = NULL;
@@ -668,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
int ret = 0;
if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -678,7 +700,7 @@ static int zpci_restore(struct device *dev)
if (ret)
goto out;
- zpci_map_resources(zdev);
+ zpci_map_resources(pdev);
zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
zdev->start_dma + zdev->iommu_size - 1,
(u64) zdev->dma_table);
@@ -689,12 +711,14 @@ out:
static int zpci_freeze(struct device *dev)
{
- struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
if (zdev->state != ZPCI_FN_STATE_ONLINE)
return 0;
zpci_unregister_ioat(zdev, 0);
+ zpci_unmap_resources(pdev);
return clp_disable_fh(zdev);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 8aa271b3d1ad..b1bb2b72302c 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
if (copy_from_user(buf, user_buffer, length))
goto out;
- memcpy_toio(io_addr, buf, length);
- ret = 0;
+ ret = zpci_memcpy_toio(io_addr, buf, length);
out:
if (buf != local_buf)
kfree(buf);
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
goto out;
io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
- ret = -EFAULT;
- if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+ ret = -EFAULT;
goto out;
-
- memcpy_fromio(buf, io_addr, length);
-
- if (copy_to_user(user_buffer, buf, length))
+ }
+ ret = zpci_memcpy_fromio(buf, io_addr, length);
+ if (ret)
goto out;
+ if (copy_to_user(user_buffer, buf, length))
+ ret = -EFAULT;
- ret = 0;
out:
if (buf != local_buf)
kfree(buf);