diff options
author | James Morris <jmorris@namei.org> | 2009-02-06 01:01:45 +0100 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-02-06 01:01:45 +0100 |
commit | cb5629b10d64a8006622ce3a52bc887d91057d69 (patch) | |
tree | 7c06d8f30783115e3384721046258ce615b129c5 /arch/ia64/kernel | |
parent | TPM: integrity fix (diff) | |
parent | seq_file: fix big-enough lseek() + read() (diff) | |
download | linux-cb5629b10d64a8006622ce3a52bc887d91057d69.tar.xz linux-cb5629b10d64a8006622ce3a52bc887d91057d69.zip |
Merge branch 'master' into next
Conflicts:
fs/namei.c
Manually merged per:
diff --cc fs/namei.c
index 734f2b5,bbc15c2..0000000
--- a/fs/namei.c
+++ b/fs/namei.c
@@@ -860,9 -848,8 +849,10 @@@ static int __link_path_walk(const char
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode);
if (err == -EAGAIN)
- err = vfs_permission(nd, MAY_EXEC);
+ err = inode_permission(nd->path.dentry->d_inode,
+ MAY_EXEC);
+ if (!err)
+ err = ima_path_check(&nd->path, MAY_EXEC);
if (err)
break;
@@@ -1525,14 -1506,9 +1509,14 @@@ int may_open(struct path *path, int acc
flag &= ~O_TRUNC;
}
- error = vfs_permission(nd, acc_mode);
+ error = inode_permission(inode, acc_mode);
if (error)
return error;
+
- error = ima_path_check(&nd->path,
++ error = ima_path_check(path,
+ acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error)
+ return error;
/*
* An append-only file must be opened in append mode for writing.
*/
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/init_task.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 35 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 26 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/pci-dma.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/sys_ia64.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 18 | ||||
-rw-r--r-- | arch/ia64/kernel/topology.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 6 |
13 files changed, 64 insertions, 68 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bd7acc71e8a9..d541671caf4a 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -65,6 +65,7 @@ EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); +u32 acpi_rsdt_forced; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -202,7 +203,6 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) Boot-time Table Parsing -------------------------------------------------------------------------- */ -static int total_cpus __initdata; static int available_cpus __initdata; struct acpi_table_madt *acpi_madt __initdata; static u8 has_8259; @@ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node) || - cpus_empty(node_to_cpumask(node))) + cpumask_empty(cpumask_of_node(node))) return AE_OK; /* We know a gsi to node mapping! */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d435f4a7a96c..e5341e2c1175 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1442,7 +1442,7 @@ sys_call_table: data8 sys_mkdir // 1055 data8 sys_rmdir data8 sys_dup - data8 sys_pipe + data8 sys_ia64_pipe data8 sys_times data8 ia64_brk // 1060 data8 sys_setgid diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 9d7e1c66faf4..5b0e830c6f33 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -17,7 +17,6 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5c4674ae8aea..5cfd3d91001a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -330,25 +330,25 @@ unmask_irq (unsigned int irq) static void -iosapic_set_affinity (unsigned int irq, cpumask_t mask) +iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) { #ifdef CONFIG_SMP u32 high32, low32; - int dest, rte_index; + int cpu, dest, rte_index; int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; struct iosapic_rte_info *rte; struct iosapic *iosapic; irq &= (~IA64_IRQ_REDIRECTED); - cpus_and(mask, mask, cpu_online_map); - if (cpus_empty(mask)) + cpu = cpumask_first_and(cpu_online_mask, mask); + if (cpu >= nr_cpu_ids) return; - if (irq_prepare_move(irq, first_cpu(mask))) + if (irq_prepare_move(irq, cpu)) return; - dest = cpu_physical_id(first_cpu(mask)); + dest = cpu_physical_id(cpu); if (!iosapic_intr_info[irq].count) return; /* not an IOSAPIC interrupt */ @@ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq) #ifdef CONFIG_NUMA { int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; - cpumask_t cpu_mask; + const struct cpumask *cpu_mask; iosapic_index = find_iosapic(gsi); if (iosapic_index < 0 || iosapic_lists[iosapic_index].node == MAX_NUMNODES) goto skip_numa_setup; - cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); - cpus_and(cpu_mask, cpu_mask, domain); - for_each_cpu_mask(numa_cpu, cpu_mask) { - if (!cpu_online(numa_cpu)) - cpu_clear(numa_cpu, cpu_mask); + cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); + num_cpus = 0; + for_each_cpu_and(numa_cpu, cpu_mask, &domain) { + if (cpu_online(numa_cpu)) + num_cpus++; } - num_cpus = cpus_weight(cpu_mask); - if (!num_cpus) goto skip_numa_setup; /* Use irq assignment to distribute across cpus in node */ cpu_index = irq % num_cpus; - for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) - numa_cpu = next_cpu(numa_cpu, cpu_mask); + for_each_cpu_and(numa_cpu, cpu_mask, &domain) + if (cpu_online(numa_cpu) && i++ >= cpu_index) + break; - if (numa_cpu != NR_CPUS) + if (numa_cpu < nr_cpu_ids) return cpu_physical_id(numa_cpu); } skip_numa_setup: @@ -731,7 +730,7 @@ skip_numa_setup: * case of NUMA.) */ do { - if (++cpu >= NR_CPUS) + if (++cpu >= nr_cpu_ids) cpu = 0; } while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7fd18f54c056..a58f64ca9f0e 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -102,21 +102,18 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { - cpumask_t mask = CPU_MASK_NONE; - - cpu_set(cpu_logical_id(hwid), mask); - if (irq < NR_IRQS) { - irq_desc[irq].affinity = mask; + cpumask_copy(&irq_desc[irq].affinity, + cpumask_of(cpu_logical_id(hwid))); irq_redir[irq] = (char) (redir & 0xff); } } -bool is_affinity_mask_valid(cpumask_t cpumask) +bool is_affinity_mask_valid(const struct cpumask *cpumask) { if (ia64_platform_is("sn2")) { /* Only allow one CPU to be specified in the smp_affinity mask */ - if (cpus_weight(cpumask) != 1) + if (cpumask_weight(cpumask) != 1) return false; } return true; @@ -128,12 +125,11 @@ bool is_affinity_mask_valid(cpumask_t cpumask) unsigned int vectors_in_migration[NR_IRQS]; /* - * Since cpu_online_map is already updated, we just need to check for + * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { - cpumask_t mask; irq_desc_t *desc; int irq, new_cpu; @@ -152,15 +148,14 @@ static void migrate_irqs(void) if (desc->status == IRQ_PER_CPU) continue; - cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); - if (any_online_cpu(mask) == NR_CPUS) { + if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) + >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; - new_cpu = any_online_cpu(cpu_online_map); - mask = cpumask_of_cpu(new_cpu); + new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? @@ -168,7 +163,8 @@ static void migrate_irqs(void) if (desc->chip && desc->chip->disable && desc->chip->enable && desc->chip->set_affinity) { desc->chip->disable(irq); - desc->chip->set_affinity(irq, mask); + desc->chip->set_affinity(irq, + cpumask_of(new_cpu)); desc->chip->enable(irq); } else { WARN_ON((!(desc->chip) || !(desc->chip->disable) || @@ -192,7 +188,7 @@ void fixup_irqs(void) * Find a new timesync master */ if (smp_processor_id() == time_keeper_id) { - time_keeper_id = first_cpu(cpu_online_map); + time_keeper_id = cpumask_first(cpu_online_mask); printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f07688da947c..9adac441ac9b 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -434,7 +434,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: @@ -670,9 +670,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, + p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); + p->ainsn.insn = NULL; + } } /* * We are resuming execution after a single step fault, so the pt_regs @@ -868,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 702a09c13238..890339339035 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -49,11 +49,12 @@ static struct irq_chip ia64_msi_chip; #ifdef CONFIG_SMP -static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) +static void ia64_set_msi_irq_affinity(unsigned int irq, + const cpumask_t *cpu_mask) { struct msi_msg msg; u32 addr, data; - int cpu = first_cpu(cpu_mask); + int cpu = first_cpu(*cpu_mask); if (!cpu_online(cpu)) return; @@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; - int cpu = first_cpu(mask); - + int cpu = cpumask_first(mask); if (!cpu_online(cpu)) return; @@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); - irq_desc[irq].affinity = mask; + irq_desc[irq].affinity = *mask; } #endif /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 2a92f637431d..d0ada067a4af 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -39,7 +39,7 @@ int iommu_detected __read_mostly; be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */ struct device fallback_dev = { - .bus_id = "fallback device", + .init_name = "fallback device", .coherent_dma_mask = DMA_32BIT_MASK, .dma_mask = &fallback_dev.coherent_dma_mask, }; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 1dcbb85fc4ee..11463994a7d5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu; */ DEFINE_PER_CPU(int, cpu_state); -/* Bitmasks of currently online, and possible CPUs */ -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_possible_map); - cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); @@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; irq_desc_t *desc = NULL; - cpumask_t mask; + const struct cpumask *mask; int retval = 0; /* @@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu) * Now re-target the CPEI to a different processor */ new_cpei_cpu = any_online_cpu(cpu_online_map); - mask = cpumask_of_cpu(new_cpei_cpu); + mask = cpumask_of(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); desc = irq_desc + ia64_cpe_irq; /* diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index bcbb6d8792d3..92ed83f34036 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -154,7 +154,7 @@ out: * and r9) as this is faster than doing a copy_to_user(). */ asmlinkage long -sys_pipe (void) +sys_ia64_pipe (void) { struct pt_regs *regs = task_pt_regs(current); int fd[2]; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 65c10a42c88f..f0ebb342409d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -93,13 +93,14 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) now = ia64_get_itc(); delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); - account_system_time(prev, 0, delta_stime); - account_system_time_scaled(prev, delta_stime); + if (idle_task(smp_processor_id()) != prev) + account_system_time(prev, 0, delta_stime, delta_stime); + else + account_idle_time(delta_stime); if (pi->ac_utime) { delta_utime = cycle_to_cputime(pi->ac_utime); - account_user_time(prev, delta_utime); - account_user_time_scaled(prev, delta_utime); + account_user_time(prev, delta_utime, delta_utime); } pi->ac_stamp = ni->ac_stamp = now; @@ -122,8 +123,10 @@ void account_system_vtime(struct task_struct *tsk) now = ia64_get_itc(); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); - account_system_time(tsk, 0, delta_stime); - account_system_time_scaled(tsk, delta_stime); + if (irq_count() || idle_task(smp_processor_id()) != tsk) + account_system_time(tsk, 0, delta_stime, delta_stime); + else + account_idle_time(delta_stime); ti->ac_stime = 0; ti->ac_stamp = now; @@ -143,8 +146,7 @@ void account_process_tick(struct task_struct *p, int user_tick) if (ti->ac_utime) { delta_utime = cycle_to_cputime(ti->ac_utime); - account_user_time(p, delta_utime); - account_user_time_scaled(p, delta_utime); + account_user_time(p, delta_utime, delta_utime); ti->ac_utime = 0; } } diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index c75b914f2d6b..a8d61a3e9a94 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) cpumask_t shared_cpu_map; cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); - len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); + len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); len += sprintf(buf+len, "\n"); return len; } diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index ff0e7c10faa7..6db08599ebbc 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -59,6 +59,7 @@ dump (const char *str, void *vp, size_t len) * (i.e. don't allow attacker to fill up logs with unaligned accesses). */ int no_unaligned_warning; +int unaligned_dump_stack; static int noprint_warning; /* @@ -1371,9 +1372,12 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) } } } else { - if (within_logging_rate_limit()) + if (within_logging_rate_limit()) { printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", ifa, regs->cr_iip + ipsr->ri); + if (unaligned_dump_stack) + dump_stack(); + } set_fs(KERNEL_DS); } |