summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2011-09-16 04:12:19 +0200
committerNicolas Pitre <nico@fluxnic.net>2011-11-27 01:21:27 +0100
commit6ee723a6570a897208b76ab3e9a495e9106b2f8c (patch)
tree6822cd8b588c789d5e71229f290a6e7b6e32e448 /arch
parentARM: move iotable mappings within the vmalloc region (diff)
downloadlinux-6ee723a6570a897208b76ab3e9a495e9106b2f8c.tar.xz
linux-6ee723a6570a897208b76ab3e9a495e9106b2f8c.zip
ARM: simplify __iounmap() when dealing with section based mapping
Firstly, there is no need to have a double pointer here as we're only walking the vmlist and not modifying it. Secondly, for the same reason, we don't need a write lock but only a read lock here, since the lock only protects the coherency of the list nothing else. Lastly, the reason for holding a lock is not what the comment says, so let's remove that misleading piece of information. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/ioremap.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index bdb248c4f55c..bc7d9bd766d1 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
- struct vm_struct **p, *tmp;
+ struct vm_struct *vm;
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
- * such a beast. We need the lock here b/c we need to clear
- * all the mappings before the area can be reclaimed
- * by someone else.
+ * such a beast.
*/
- write_lock(&vmlist_lock);
- for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
- if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
- if (tmp->flags & VM_ARM_SECTION_MAPPING) {
- unmap_area_sections((unsigned long)tmp->addr,
- tmp->size);
+ read_lock(&vmlist_lock);
+ for (vm = vmlist; vm; vm = vm->next) {
+ if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) {
+ if (vm->flags & VM_ARM_SECTION_MAPPING) {
+ unmap_area_sections((unsigned long)vm->addr,
+ vm->size);
}
break;
}
}
- write_unlock(&vmlist_lock);
+ read_unlock(&vmlist_lock);
#endif
vunmap(addr);