diff options
author | Ohad Ben-Cohen <ohad@wizery.com> | 2011-08-15 22:21:41 +0200 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2011-08-26 11:46:00 +0200 |
commit | fcf3a6ef4a588c9f06ad7b01c83534ab81985a3f (patch) | |
tree | d73b98dda1ad4def8eb2f4cc012eb931ef881e1b /drivers | |
parent | omap: iommu: migrate to the generic IOMMU API (diff) | |
download | linux-fcf3a6ef4a588c9f06ad7b01c83534ab81985a3f.tar.xz linux-fcf3a6ef4a588c9f06ad7b01c83534ab81985a3f.zip |
omap: iommu/iovmm: move to dedicated iommu folder
Move OMAP's iommu drivers to the dedicated iommu drivers folder.
While OMAP's iovmm (virtual memory manager) driver does not strictly
belong to the iommu drivers folder, move it there as well, because
it's by no means OMAP-specific (in concept. technically it is still
coupled with OMAP's iommu).
Eventually, iovmm will be completely replaced with the generic,
iommu-based, dma-mapping API.
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/iommu/Kconfig | 18 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 3 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu-debug.c | 418 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 1326 | ||||
-rw-r--r-- | drivers/iommu/omap-iovmm.c | 923 | ||||
-rw-r--r-- | drivers/media/video/Kconfig | 2 |
6 files changed, 2689 insertions, 1 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b57b3fa492f3..432463b2e78d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -107,4 +107,22 @@ config INTR_REMAP To use x2apic mode in the CPU's which support x2APIC enhancements or to support platforms with CPU's having > 8 bit APIC ID, say Y. +# OMAP IOMMU support +config OMAP_IOMMU + bool "OMAP IOMMU Support" + select IOMMU_API + +config OMAP_IOVMM + tristate + select OMAP_IOMMU + +config OMAP_IOMMU_DEBUG + tristate "Export OMAP IOMMU/IOVMM internals in DebugFS" + depends on OMAP_IOVMM && DEBUG_FS + help + Select this to see extensive information about + the internal state of OMAP IOMMU/IOVMM in debugfs. + + Say N unless you know you need this. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4d4d77df7cac..f798cdd3699e 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -3,3 +3,6 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o +obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o +obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o +obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c new file mode 100644 index 000000000000..0f8c8dd55018 --- /dev/null +++ b/drivers/iommu/omap-iommu-debug.c @@ -0,0 +1,418 @@ +/* + * omap iommu: debugfs interface + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <linux/debugfs.h> + +#include <plat/iommu.h> +#include <plat/iovmm.h> + +#include <plat/iopgtable.h> + +#define MAXCOLUMN 100 /* for short messages */ + +static DEFINE_MUTEX(iommu_debug_lock); + +static struct dentry *iommu_debug_root; + +static ssize_t debug_read_ver(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + u32 ver = iommu_arch_version(); + char buf[MAXCOLUMN], *p = buf; + + p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); + + return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +} + +static ssize_t debug_read_regs(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + char *p, *buf; + ssize_t bytes; + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + bytes = iommu_dump_ctx(obj, p, count); + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); + + mutex_unlock(&iommu_debug_lock); + kfree(buf); + + return bytes; +} + +static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + char *p, *buf; + ssize_t bytes, rest; + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); + p += sprintf(p, "-----------------------------------------\n"); + rest = count - (p - buf); + p += dump_tlb_entries(obj, p, rest); + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + kfree(buf); + + return bytes; +} + +static ssize_t debug_write_pagetable(struct file *file, + const char __user *userbuf, size_t count, loff_t *ppos) +{ + struct iotlb_entry e; + struct cr_regs cr; + int err; + struct iommu *obj = file->private_data; + char buf[MAXCOLUMN], *p = buf; + + count = min(count, sizeof(buf)); + + mutex_lock(&iommu_debug_lock); + if (copy_from_user(p, userbuf, count)) { + mutex_unlock(&iommu_debug_lock); + return -EFAULT; + } + + sscanf(p, "%x %x", &cr.cam, &cr.ram); + if (!cr.cam || !cr.ram) { + mutex_unlock(&iommu_debug_lock); + return -EINVAL; + } + + iotlb_cr_to_e(&cr, &e); + err = iopgtable_store_entry(obj, &e); + if (err) + dev_err(obj->dev, "%s: fail to store cr\n", __func__); + + mutex_unlock(&iommu_debug_lock); + return count; +} + +#define dump_ioptable_entry_one(lv, da, val) \ + ({ \ + int __err = 0; \ + ssize_t bytes; \ + const int maxcol = 22; \ + const char *str = "%d: %08x %08x\n"; \ + bytes = snprintf(p, maxcol, str, lv, da, val); \ + p += bytes; \ + len -= bytes; \ + if (len < maxcol) \ + __err = -ENOMEM; \ + __err; \ + }) + +static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) +{ + int i; + u32 *iopgd; + char *p = buf; + + spin_lock(&obj->page_table_lock); + + iopgd = iopgd_offset(obj, 0); + for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { + int j, err; + u32 *iopte; + u32 da; + + if (!*iopgd) + continue; + + if (!(*iopgd & IOPGD_TABLE)) { + da = i << IOPGD_SHIFT; + + err = dump_ioptable_entry_one(1, da, *iopgd); + if (err) + goto out; + continue; + } + + iopte = iopte_offset(iopgd, 0); + + for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { + if (!*iopte) + continue; + + da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); + err = dump_ioptable_entry_one(2, da, *iopgd); + if (err) + goto out; + } + } +out: + spin_unlock(&obj->page_table_lock); + + return p - buf; +} + +static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + char *p, *buf; + size_t bytes; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); + p += sprintf(p, "-----------------------------------------\n"); + + mutex_lock(&iommu_debug_lock); + + bytes = PAGE_SIZE - (p - buf); + p += dump_ioptable(obj, p, bytes); + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + char *p, *buf; + struct iovm_struct *tmp; + int uninitialized_var(i); + ssize_t bytes; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n", + "No", "start", "end", "size", "flags"); + p += sprintf(p, "-------------------------------------------------\n"); + + mutex_lock(&iommu_debug_lock); + + list_for_each_entry(tmp, &obj->mmap, list) { + size_t len; + const char *str = "%3d %08x-%08x %6x %8x\n"; + const int maxcol = 39; + + len = tmp->da_end - tmp->da_start; + p += snprintf(p, maxcol, str, + i, tmp->da_start, tmp->da_end, len, tmp->flags); + + if (PAGE_SIZE - (p - buf) < maxcol) + break; + i++; + } + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_read_mem(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + char *p, *buf; + struct iovm_struct *area; + ssize_t bytes; + + count = min_t(ssize_t, count, PAGE_SIZE); + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + area = find_iovm_area(obj, (u32)ppos); + if (IS_ERR(area)) { + bytes = -EINVAL; + goto err_out; + } + memcpy(p, area->va, count); + p += count; + + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); +err_out: + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return bytes; +} + +static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct iommu *obj = file->private_data; + struct iovm_struct *area; + char *p, *buf; + + count = min_t(size_t, count, PAGE_SIZE); + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + mutex_lock(&iommu_debug_lock); + + if (copy_from_user(p, userbuf, count)) { + count = -EFAULT; + goto err_out; + } + + area = find_iovm_area(obj, (u32)ppos); + if (IS_ERR(area)) { + count = -EINVAL; + goto err_out; + } + memcpy(area->va, p, count); +err_out: + mutex_unlock(&iommu_debug_lock); + free_page((unsigned long)buf); + + return count; +} + +static int debug_open_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUG_FOPS(name) \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_generic, \ + .read = debug_read_##name, \ + .write = debug_write_##name, \ + .llseek = generic_file_llseek, \ + }; + +#define DEBUG_FOPS_RO(name) \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_generic, \ + .read = debug_read_##name, \ + .llseek = generic_file_llseek, \ + }; + +DEBUG_FOPS_RO(ver); +DEBUG_FOPS_RO(regs); +DEBUG_FOPS_RO(tlb); +DEBUG_FOPS(pagetable); +DEBUG_FOPS_RO(mmap); +DEBUG_FOPS(mem); + +#define __DEBUG_ADD_FILE(attr, mode) \ + { \ + struct dentry *dent; \ + dent = debugfs_create_file(#attr, mode, parent, \ + obj, &debug_##attr##_fops); \ + if (!dent) \ + return -ENOMEM; \ + } + +#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600) +#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400) + +static int iommu_debug_register(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct iommu *obj = platform_get_drvdata(pdev); + struct dentry *d, *parent; + + if (!obj || !obj->dev) + return -EINVAL; + + d = debugfs_create_dir(obj->name, iommu_debug_root); + if (!d) + return -ENOMEM; + parent = d; + + d = debugfs_create_u8("nr_tlb_entries", 400, parent, + (u8 *)&obj->nr_tlb_entries); + if (!d) + return -ENOMEM; + + DEBUG_ADD_FILE_RO(ver); + DEBUG_ADD_FILE_RO(regs); + DEBUG_ADD_FILE_RO(tlb); + DEBUG_ADD_FILE(pagetable); + DEBUG_ADD_FILE_RO(mmap); + DEBUG_ADD_FILE(mem); + + return 0; +} + +static int __init iommu_debug_init(void) +{ + struct dentry *d; + int err; + + d = debugfs_create_dir("iommu", NULL); + if (!d) + return -ENOMEM; + iommu_debug_root = d; + + err = foreach_iommu_device(d, iommu_debug_register); + if (err) + goto err_out; + return 0; + +err_out: + debugfs_remove_recursive(iommu_debug_root); + return err; +} +module_init(iommu_debug_init) + +static void __exit iommu_debugfs_exit(void) +{ + debugfs_remove_recursive(iommu_debug_root); +} +module_exit(iommu_debugfs_exit) + +MODULE_DESCRIPTION("omap iommu: debugfs interface"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c new file mode 100644 index 000000000000..bf8de6475746 --- /dev/null +++ b/drivers/iommu/omap-iommu.c @@ -0,0 +1,1326 @@ +/* + * omap iommu: tlb and pagetable primitives + * + * Copyright (C) 2008-2010 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, + * Paul Mundt and Toshihiro Kobayashi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/iommu.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> + +#include <asm/cacheflush.h> + +#include <plat/iommu.h> + +#include <plat/iopgtable.h> + +#define for_each_iotlb_cr(obj, n, __i, cr) \ + for (__i = 0; \ + (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ + __i++) + +/** + * struct omap_iommu_domain - omap iommu domain + * @pgtable: the page table + * @iommu_dev: an omap iommu device attached to this domain. only a single + * iommu device can be attached for now. + * @lock: domain lock, should be taken when attaching/detaching + */ +struct omap_iommu_domain { + u32 *pgtable; + struct iommu *iommu_dev; + spinlock_t lock; +}; + +/* accommodate the difference between omap1 and omap2/3 */ +static const struct iommu_functions *arch_iommu; + +static struct platform_driver omap_iommu_driver; +static struct kmem_cache *iopte_cachep; + +/** + * install_iommu_arch - Install archtecure specific iommu functions + * @ops: a pointer to architecture specific iommu functions + * + * There are several kind of iommu algorithm(tlb, pagetable) among + * omap series. This interface installs such an iommu algorighm. + **/ +int install_iommu_arch(const struct iommu_functions *ops) +{ + if (arch_iommu) + return -EBUSY; + + arch_iommu = ops; + return 0; +} +EXPORT_SYMBOL_GPL(install_iommu_arch); + +/** + * uninstall_iommu_arch - Uninstall archtecure specific iommu functions + * @ops: a pointer to architecture specific iommu functions + * + * This interface uninstalls the iommu algorighm installed previously. + **/ +void uninstall_iommu_arch(const struct iommu_functions *ops) +{ + if (arch_iommu != ops) + pr_err("%s: not your arch\n", __func__); + + arch_iommu = NULL; +} +EXPORT_SYMBOL_GPL(uninstall_iommu_arch); + +/** + * iommu_save_ctx - Save registers for pm off-mode support + * @obj: target iommu + **/ +void iommu_save_ctx(struct iommu *obj) +{ + arch_iommu->save_ctx(obj); +} +EXPORT_SYMBOL_GPL(iommu_save_ctx); + +/** + * iommu_restore_ctx - Restore registers for pm off-mode support + * @obj: target iommu + **/ +void iommu_restore_ctx(struct iommu *obj) +{ + arch_iommu->restore_ctx(obj); +} +EXPORT_SYMBOL_GPL(iommu_restore_ctx); + +/** + * iommu_arch_version - Return running iommu arch version + **/ +u32 iommu_arch_version(void) +{ + return arch_iommu->version; +} +EXPORT_SYMBOL_GPL(iommu_arch_version); + +static int iommu_enable(struct iommu *obj) +{ + int err; + + if (!obj) + return -EINVAL; + + if (!arch_iommu) + return -ENODEV; + + clk_enable(obj->clk); + + err = arch_iommu->enable(obj); + + clk_disable(obj->clk); + return err; +} + +static void iommu_disable(struct iommu *obj) +{ + if (!obj) + return; + + clk_enable(obj->clk); + + arch_iommu->disable(obj); + + clk_disable(obj->clk); +} + +/* + * TLB operations + */ +void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) +{ + BUG_ON(!cr || !e); + + arch_iommu->cr_to_e(cr, e); +} +EXPORT_SYMBOL_GPL(iotlb_cr_to_e); + +static inline int iotlb_cr_valid(struct cr_regs *cr) +{ + if (!cr) + return -EINVAL; + + return arch_iommu->cr_valid(cr); +} + +static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, + struct iotlb_entry *e) +{ + if (!e) + return NULL; + + return arch_iommu->alloc_cr(obj, e); +} + +u32 iotlb_cr_to_virt(struct cr_regs *cr) +{ + return arch_iommu->cr_to_virt(cr); +} +EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); + +static u32 get_iopte_attr(struct iotlb_entry *e) +{ + return arch_iommu->get_pte_attr(e); +} + +static u32 iommu_report_fault(struct iommu *obj, u32 *da) +{ + return arch_iommu->fault_isr(obj, da); +} + +static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) +{ + u32 val; + + val = iommu_read_reg(obj, MMU_LOCK); + + l->base = MMU_LOCK_BASE(val); + l->vict = MMU_LOCK_VICT(val); + +} + +static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) +{ + u32 val; + + val = (l->base << MMU_LOCK_BASE_SHIFT); + val |= (l->vict << MMU_LOCK_VICT_SHIFT); + + iommu_write_reg(obj, val, MMU_LOCK); +} + +static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) +{ + arch_iommu->tlb_read_cr(obj, cr); +} + +static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) +{ + arch_iommu->tlb_load_cr(obj, cr); + + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + iommu_write_reg(obj, 1, MMU_LD_TLB); +} + +/** + * iotlb_dump_cr - Dump an iommu tlb entry into buf + * @obj: target iommu + * @cr: contents of cam and ram register + * @buf: output buffer + **/ +static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, + char *buf) +{ + BUG_ON(!cr || !buf); + + return arch_iommu->dump_cr(obj, cr, buf); +} + +/* only used in iotlb iteration for-loop */ +static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) +{ + struct cr_regs cr; + struct iotlb_lock l; + + iotlb_lock_get(obj, &l); + l.vict = n; + iotlb_lock_set(obj, &l); + iotlb_read_cr(obj, &cr); + + return cr; +} + +/** + * load_iotlb_entry - Set an iommu tlb entry + * @obj: target iommu + * @e: an iommu tlb entry info + **/ +int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) +{ + int err = 0; + struct iotlb_lock l; + struct cr_regs *cr; + + if (!obj || !obj->nr_tlb_entries || !e) + return -EINVAL; + + clk_enable(obj->clk); + + iotlb_lock_get(obj, &l); + if (l.base == obj->nr_tlb_entries) { + dev_warn(obj->dev, "%s: preserve entries full\n", __func__); + err = -EBUSY; + goto out; + } + if (!e->prsvd) { + int i; + struct cr_regs tmp; + + for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) + if (!iotlb_cr_valid(&tmp)) + break; + + if (i == obj->nr_tlb_entries) { + dev_dbg(obj->dev, "%s: full: no entry\n", __func__); + err = -EBUSY; + goto out; + } + + iotlb_lock_get(obj, &l); + } else { + l.vict = l.base; + iotlb_lock_set(obj, &l); + } + + cr = iotlb_alloc_cr(obj, e); + if (IS_ERR(cr)) { + clk_disable(obj->clk); + return PTR_ERR(cr); + } + + iotlb_load_cr(obj, cr); + kfree(cr); + + if (e->prsvd) + l.base++; + /* increment victim for next tlb load */ + if (++l.vict == obj->nr_tlb_entries) + l.vict = l.base; + iotlb_lock_set(obj, &l); +out: + clk_disable(obj->clk); + return err; +} +EXPORT_SYMBOL_GPL(load_iotlb_entry); + +/** + * flush_iotlb_page - Clear an iommu tlb entry + * @obj: target iommu + * @da: iommu device virtual address + * + * Clear an iommu tlb entry which includes 'da' address. + **/ +void flush_iotlb_page(struct iommu *obj, u32 da) +{ + int i; + struct cr_regs cr; + + clk_enable(obj->clk); + + for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { + u32 start; + size_t bytes; + + if (!iotlb_cr_valid(&cr)) + continue; + + start = iotlb_cr_to_virt(&cr); + bytes = iopgsz_to_bytes(cr.cam & 3); + + if ((start <= da) && (da < start + bytes)) { + dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", + __func__, start, da, bytes); + iotlb_load_cr(obj, &cr); + iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); + } + } + clk_disable(obj->clk); + + if (i == obj->nr_tlb_entries) + dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); +} +EXPORT_SYMBOL_GPL(flush_iotlb_page); + +/** + * flush_iotlb_range - Clear an iommu tlb entries + * @obj: target iommu + * @start: iommu device virtual address(start) + * @end: iommu device virtual address(end) + * + * Clear an iommu tlb entry which includes 'da' address. + **/ +void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) +{ + u32 da = start; + + while (da < end) { + flush_iotlb_page(obj, da); + /* FIXME: Optimize for multiple page size */ + da += IOPTE_SIZE; + } +} +EXPORT_SYMBOL_GPL(flush_iotlb_range); + +/** + * flush_iotlb_all - Clear all iommu tlb entries + * @obj: target iommu + **/ +void flush_iotlb_all(struct iommu *obj) +{ + struct iotlb_lock l; + + clk_enable(obj->clk); + + l.base = 0; + l.vict = 0; + iotlb_lock_set(obj, &l); + + iommu_write_reg(obj, 1, MMU_GFLUSH); + + clk_disable(obj->clk); +} +EXPORT_SYMBOL_GPL(flush_iotlb_all); + +/** + * iommu_set_twl - enable/disable table walking logic + * @obj: target iommu + * @on: enable/disable + * + * Function used to enable/disable TWL. If one wants to work + * exclusively with locked TLB entries and receive notifications + * for TLB miss then call this function to disable TWL. + */ +void iommu_set_twl(struct iommu *obj, bool on) +{ + clk_enable(obj->clk); + arch_iommu->set_twl(obj, on); + clk_disable(obj->clk); +} +EXPORT_SYMBOL_GPL(iommu_set_twl); + +#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) + +ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) +{ + if (!obj || !buf) + return -EINVAL; + + clk_enable(obj->clk); + + bytes = arch_iommu->dump_ctx(obj, buf, bytes); + + clk_disable(obj->clk); + + return bytes; +} +EXPORT_SYMBOL_GPL(iommu_dump_ctx); + +static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) +{ + int i; + struct iotlb_lock saved; + struct cr_regs tmp; + struct cr_regs *p = crs; + + clk_enable(obj->clk); + iotlb_lock_get(obj, &saved); + + for_each_iotlb_cr(obj, num, i, tmp) { + if (!iotlb_cr_valid(&tmp)) + continue; + *p++ = tmp; + } + + iotlb_lock_set(obj, &saved); + clk_disable(obj->clk); + + return p - crs; +} + +/** + * dump_tlb_entries - dump cr arrays to given buffer + * @obj: target iommu + * @buf: output buffer + **/ +size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) +{ + int i, num; + struct cr_regs *cr; + char *p = buf; + + num = bytes / sizeof(*cr); + num = min(obj->nr_tlb_entries, num); + + cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); + if (!cr) + return 0; + + num = __dump_tlb_entries(obj, cr, num); + for (i = 0; i < num; i++) + p += iotlb_dump_cr(obj, cr + i, p); + kfree(cr); + + return p - buf; +} +EXPORT_SYMBOL_GPL(dump_tlb_entries); + +int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) +{ + return driver_for_each_device(&omap_iommu_driver.driver, + NULL, data, fn); +} +EXPORT_SYMBOL_GPL(foreach_iommu_device); + +#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ + +/* + * H/W pagetable operations + */ +static void flush_iopgd_range(u32 *first, u32 *last) +{ + /* FIXME: L2 cache should be taken care of if it exists */ + do { + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" + : : "r" (first)); + first += L1_CACHE_BYTES / sizeof(*first); + } while (first <= last); +} + +static void flush_iopte_range(u32 *first, u32 *last) +{ + /* FIXME: L2 cache should be taken care of if it exists */ + do { + asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" + : : "r" (first)); + first += L1_CACHE_BYTES / sizeof(*first); + } while (first <= last); +} + +static void iopte_free(u32 *iopte) +{ + /* Note: freed iopte's must be clean ready for re-use */ + kmem_cache_free(iopte_cachep, iopte); +} + +static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) +{ + u32 *iopte; + + /* a table has already existed */ + if (*iopgd) + goto pte_ready; + + /* + * do the allocation outside the page table lock + */ + spin_unlock(&obj->page_table_lock); + iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); + spin_lock(&obj->page_table_lock); + + if (!*iopgd) { + if (!iopte) + return ERR_PTR(-ENOMEM); + + *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; + flush_iopgd_range(iopgd, iopgd); + + dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); + } else { + /* We raced, free the reduniovant table */ + iopte_free(iopte); + } + +pte_ready: + iopte = iopte_offset(iopgd, da); + + dev_vdbg(obj->dev, + "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", + __func__, da, iopgd, *iopgd, iopte, *iopte); + + return iopte; +} + +static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + + if ((da | pa) & ~IOSECTION_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOSECTION_SIZE); + return -EINVAL; + } + + *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; + flush_iopgd_range(iopgd, iopgd); + return 0; +} + +static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + int i; + + if ((da | pa) & ~IOSUPER_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOSUPER_SIZE); + return -EINVAL; + } + + for (i = 0; i < 16; i++) + *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; + flush_iopgd_range(iopgd, iopgd + 15); + return 0; +} + +static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + u32 *iopte = iopte_alloc(obj, iopgd, da); + + if (IS_ERR(iopte)) + return PTR_ERR(iopte); + + *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; + flush_iopte_range(iopte, iopte); + + dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", + __func__, da, pa, iopte, *iopte); + + return 0; +} + +static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) +{ + u32 *iopgd = iopgd_offset(obj, da); + u32 *iopte = iopte_alloc(obj, iopgd, da); + int i; + + if ((da | pa) & ~IOLARGE_MASK) { + dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", + __func__, da, pa, IOLARGE_SIZE); + return -EINVAL; + } + + if (IS_ERR(iopte)) + return PTR_ERR(iopte); + + for (i = 0; i < 16; i++) + *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; + flush_iopte_range(iopte, iopte + 15); + return 0; +} + +static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) +{ + int (*fn)(struct iommu *, u32, u32, u32); + u32 prot; + int err; + + if (!obj || !e) + return -EINVAL; + + switch (e->pgsz) { + case MMU_CAM_PGSZ_16M: + fn = iopgd_alloc_super; + break; + case MMU_CAM_PGSZ_1M: + fn = iopgd_alloc_section; + break; + case MMU_CAM_PGSZ_64K: + fn = iopte_alloc_large; + break; + case MMU_CAM_PGSZ_4K: + fn = iopte_alloc_page; + break; + default: + fn = NULL; + BUG(); + break; + } + + prot = get_iopte_attr(e); + + spin_lock(&obj->page_table_lock); + err = fn(obj, e->da, e->pa, prot); + spin_unlock(&obj->page_table_lock); + + return err; +} + +/** + * iopgtable_store_entry - Make an iommu pte entry + * @obj: target iommu + * @e: an iommu tlb entry info + **/ +int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) +{ + int err; + + flush_iotlb_page(obj, e->da); + err = iopgtable_store_entry_core(obj, e); +#ifdef PREFETCH_IOTLB + if (!err) + load_iotlb_entry(obj, e); +#endif + return err; +} +EXPORT_SYMBOL_GPL(iopgtable_store_entry); + +/** + * iopgtable_lookup_entry - Lookup an iommu pte entry + * @obj: target iommu + * @da: iommu device virtual address + * @ppgd: iommu pgd entry pointer to be returned + * @ppte: iommu pte entry pointer to be returned + **/ +void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) +{ + u32 *iopgd, *iopte = NULL; + + iopgd = iopgd_offset(obj, da); + if (!*iopgd) + goto out; + + if (iopgd_is_table(*iopgd)) + iopte = iopte_offset(iopgd, da); +out: + *ppgd = iopgd; + *ppte = iopte; +} +EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); + +static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) +{ + size_t bytes; + u32 *iopgd = iopgd_offset(obj, da); + int nent = 1; + + if (!*iopgd) + return 0; + + if (iopgd_is_table(*iopgd)) { + int i; + u32 *iopte = iopte_offset(iopgd, da); + + bytes = IOPTE_SIZE; + if (*iopte & IOPTE_LARGE) { + nent *= 16; + /* rewind to the 1st entry */ + iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); + } + bytes *= nent; + memset(iopte, 0, nent * sizeof(*iopte)); + flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); + + /* + * do table walk to check if this table is necessary or not + */ + iopte = iopte_offset(iopgd, 0); + for (i = 0; i < PTRS_PER_IOPTE; i++) + if (iopte[i]) + goto out; + + iopte_free(iopte); + nent = 1; /* for the next L1 entry */ + } else { + bytes = IOPGD_SIZE; + if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { + nent *= 16; + /* rewind to the 1st entry */ + iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); + } + bytes *= nent; + } + memset(iopgd, 0, nent * sizeof(*iopgd)); + flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); +out: + return bytes; +} + +/** + * iopgtable_clear_entry - Remove an iommu pte entry + * @obj: target iommu + * @da: iommu device virtual address + **/ +size_t iopgtable_clear_entry(struct iommu *obj, u32 da) +{ + size_t bytes; + + spin_lock(&obj->page_table_lock); + + bytes = iopgtable_clear_entry_core(obj, da); + flush_iotlb_page(obj, da); + + spin_unlock(&obj->page_table_lock); + + return bytes; +} +EXPORT_SYMBOL_GPL(iopgtable_clear_entry); + +static void iopgtable_clear_entry_all(struct iommu *obj) +{ + int i; + + spin_lock(&obj->page_table_lock); + + for (i = 0; i < PTRS_PER_IOPGD; i++) { + u32 da; + u32 *iopgd; + + da = i << IOPGD_SHIFT; + iopgd = iopgd_offset(obj, da); + + if (!*iopgd) + continue; + + if (iopgd_is_table(*iopgd)) + iopte_free(iopte_offset(iopgd, 0)); + + *iopgd = 0; + flush_iopgd_range(iopgd, iopgd); + } + + flush_iotlb_all(obj); + + spin_unlock(&obj->page_table_lock); +} + +/* + * Device IOMMU generic operations + */ +static irqreturn_t iommu_fault_handler(int irq, void *data) +{ + u32 da, errs; + u32 *iopgd, *iopte; + struct iommu *obj = data; + + if (!obj->refcount) + return IRQ_NONE; + + clk_enable(obj->clk); + errs = iommu_report_fault(obj, &da); + clk_disable(obj->clk); + if (errs == 0) + return IRQ_HANDLED; + + /* Fault callback or TLB/PTE Dynamic loading */ + if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) + return IRQ_HANDLED; + + iommu_disable(obj); + + iopgd = iopgd_offset(obj, da); + + if (!iopgd_is_table(*iopgd)) { + dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " + "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); + return IRQ_NONE; + } + + iopte = iopte_offset(iopgd, da); + + dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " + "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, + iopte, *iopte); + + return IRQ_NONE; +} + +static int device_match_by_alias(struct device *dev, void *data) +{ + struct iommu *obj = to_iommu(dev); + const char *name = data; + + pr_debug("%s: %s %s\n", __func__, obj->name, name); + + return strcmp(obj->name, name) == 0; +} + +/** + * iommu_set_da_range - Set a valid device address range + * @obj: target iommu + * @start Start of valid range + * @end End of valid range + **/ +int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) +{ + + if (!obj) + return -EFAULT; + + if (end < start || !PAGE_ALIGN(start | end)) + return -EINVAL; + + obj->da_start = start; + obj->da_end = end; + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_set_da_range); + +/** + * omap_find_iommu_device() - find an omap iommu device by name + * @name: name of the iommu device + * + * The generic iommu API requires the caller to provide the device + * he wishes to attach to a certain iommu domain. + * + * Drivers generally should not bother with this as it should just + * be taken care of by the DMA-API using dev_archdata. + * + * This function is provided as an interim solution until the latter + * materializes, and omap3isp is fully migrated to the DMA-API. + */ +struct device *omap_find_iommu_device(const char *name) +{ + return driver_find_device(&omap_iommu_driver.driver, NULL, + (void *)name, + device_match_by_alias); +} +EXPORT_SYMBOL_GPL(omap_find_iommu_device); + +/** + * omap_iommu_attach() - attach iommu device to an iommu domain + * @dev: target omap iommu device + * @iopgd: page table + **/ +static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) +{ + int err = -ENOMEM; + struct iommu *obj = to_iommu(dev); + + spin_lock(&obj->iommu_lock); + + /* an iommu device can only be attached once */ + if (++obj->refcount > 1) { + dev_err(dev, "%s: already attached!\n", obj->name); + err = -EBUSY; + goto err_enable; + } + + obj->iopgd = iopgd; + err = iommu_enable(obj); + if (err) + goto err_enable; + flush_iotlb_all(obj); + + if (!try_module_get(obj->owner)) + goto err_module; + + spin_unlock(&obj->iommu_lock); + + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); + return obj; + +err_module: + if (obj->refcount == 1) + iommu_disable(obj); +err_enable: + obj->refcount--; + spin_unlock(&obj->iommu_lock); + return ERR_PTR(err); +} + +/** + * omap_iommu_detach - release iommu device + * @obj: target iommu + **/ +static void omap_iommu_detach(struct iommu *obj) +{ + if (!obj || IS_ERR(obj)) + return; + + spin_lock(&obj->iommu_lock); + + if (--obj->refcount == 0) + iommu_disable(obj); + + module_put(obj->owner); + + obj->iopgd = NULL; + + spin_unlock(&obj->iommu_lock); + + dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); +} + +int iommu_set_isr(const char *name, + int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, + void *priv), + void *isr_priv) +{ + struct device *dev; + struct iommu *obj; + + dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, + device_match_by_alias); + if (!dev) + return -ENODEV; + + obj = to_iommu(dev); + mutex_lock(&obj->iommu_lock); + if (obj->refcount != 0) { + mutex_unlock(&obj->iommu_lock); + return -EBUSY; + } + obj->isr = isr; + obj->isr_priv = isr_priv; + mutex_unlock(&obj->iommu_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(iommu_set_isr); + +/* + * OMAP Device MMU(IOMMU) detection + */ +static int __devinit omap_iommu_probe(struct platform_device *pdev) +{ + int err = -ENODEV; + int irq; + struct iommu *obj; + struct resource *res; + struct iommu_platform_data *pdata = pdev->dev.platform_data; + + if (pdev->num_resources != 2) + return -EINVAL; + + obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); + if (!obj) + return -ENOMEM; + + obj->clk = clk_get(&pdev->dev, pdata->clk_name); + if (IS_ERR(obj->clk)) + goto err_clk; + + obj->nr_tlb_entries = pdata->nr_tlb_entries; + obj->name = pdata->name; + obj->dev = &pdev->dev; + obj->ctx = (void *)obj + sizeof(*obj); + obj->da_start = pdata->da_start; + obj->da_end = pdata->da_end; + + spin_lock_init(&obj->iommu_lock); + mutex_init(&obj->mmap_lock); + spin_lock_init(&obj->page_table_lock); + INIT_LIST_HEAD(&obj->mmap); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + err = -ENODEV; + goto err_mem; + } + + res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!res) { + err = -EIO; + goto err_mem; + } + + obj->regbase = ioremap(res->start, resource_size(res)); + if (!obj->regbase) { + err = -ENOMEM; + goto err_ioremap; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + err = -ENODEV; + goto err_irq; + } + err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, + dev_name(&pdev->dev), obj); + if (err < 0) + goto err_irq; + platform_set_drvdata(pdev, obj); + + dev_info(&pdev->dev, "%s registered\n", obj->name); + return 0; + +err_irq: + iounmap(obj->regbase); +err_ioremap: + release_mem_region(res->start, resource_size(res)); +err_mem: + clk_put(obj->clk); +err_clk: + kfree(obj); + return err; +} + +static int __devexit omap_iommu_remove(struct platform_device *pdev) +{ + int irq; + struct resource *res; + struct iommu *obj = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + iopgtable_clear_entry_all(obj); + + irq = platform_get_irq(pdev, 0); + free_irq(irq, obj); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + iounmap(obj->regbase); + + clk_put(obj->clk); + dev_info(&pdev->dev, "%s removed\n", obj->name); + kfree(obj); + return 0; +} + +static struct platform_driver omap_iommu_driver = { + .probe = omap_iommu_probe, + .remove = __devexit_p(omap_iommu_remove), + .driver = { + .name = "omap-iommu", + }, +}; + +static void iopte_cachep_ctor(void *iopte) +{ + clean_dcache_area(iopte, IOPTE_TABLE_SIZE); +} + +static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, + phys_addr_t pa, int order, int prot) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + size_t bytes = PAGE_SIZE << order; + struct iotlb_entry e; + int omap_pgsz; + u32 ret, flags; + + /* we only support mapping a single iommu page for now */ + omap_pgsz = bytes_to_iopgsz(bytes); + if (omap_pgsz < 0) { + dev_err(dev, "invalid size to map: %d\n", bytes); + return -EINVAL; + } + + dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); + + flags = omap_pgsz | prot; + + iotlb_init_entry(&e, da, pa, flags); + + ret = iopgtable_store_entry(oiommu, &e); + if (ret) { + dev_err(dev, "iopgtable_store_entry failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, + int order) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + size_t bytes = PAGE_SIZE << order; + size_t ret; + + dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes); + + ret = iopgtable_clear_entry(oiommu, da); + if (ret != bytes) { + dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes); + return -EINVAL; + } + + return 0; +} + +static int +omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct iommu *oiommu; + int ret = 0; + + spin_lock(&omap_domain->lock); + + /* only a single device is supported per domain for now */ + if (omap_domain->iommu_dev) { + dev_err(dev, "iommu domain is already attached\n"); + ret = -EBUSY; + goto out; + } + + /* get a handle to and enable the omap iommu */ + oiommu = omap_iommu_attach(dev, omap_domain->pgtable); + if (IS_ERR(oiommu)) { + ret = PTR_ERR(oiommu); + dev_err(dev, "can't get omap iommu: %d\n", ret); + goto out; + } + + omap_domain->iommu_dev = oiommu; + +out: + spin_unlock(&omap_domain->lock); + return ret; +} + +static void omap_iommu_detach_dev(struct iommu_domain *domain, + struct device *dev) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct iommu *oiommu = to_iommu(dev); + + spin_lock(&omap_domain->lock); + + /* only a single device is supported per domain for now */ + if (omap_domain->iommu_dev != oiommu) { + dev_err(dev, "invalid iommu device\n"); + goto out; + } + + iopgtable_clear_entry_all(oiommu); + + omap_iommu_detach(oiommu); + + omap_domain->iommu_dev = NULL; + +out: + spin_unlock(&omap_domain->lock); +} + +static int omap_iommu_domain_init(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain; + + omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); + if (!omap_domain) { + pr_err("kzalloc failed\n"); + goto out; + } + + omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); + if (!omap_domain->pgtable) { + pr_err("kzalloc failed\n"); + goto fail_nomem; + } + + /* + * should never fail, but please keep this around to ensure + * we keep the hardware happy + */ + BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); + + clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); + spin_lock_init(&omap_domain->lock); + + domain->priv = omap_domain; + + return 0; + +fail_nomem: + kfree(omap_domain); +out: + return -ENOMEM; +} + +/* assume device was already detached */ +static void omap_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + + domain->priv = NULL; + + kfree(omap_domain->pgtable); + kfree(omap_domain); +} + +static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long da) +{ + struct omap_iommu_domain *omap_domain = domain->priv; + struct iommu *oiommu = omap_domain->iommu_dev; + struct device *dev = oiommu->dev; + u32 *pgd, *pte; + phys_addr_t ret = 0; + + iopgtable_lookup_entry(oiommu, da, &pgd, &pte); + + if (pte) { + if (iopte_is_small(*pte)) + ret = omap_iommu_translate(*pte, da, IOPTE_MASK); + else if (iopte_is_large(*pte)) + ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); + else + dev_err(dev, "bogus pte 0x%x", *pte); + } else { + if (iopgd_is_section(*pgd)) + ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); + else if (iopgd_is_super(*pgd)) + ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); + else + dev_err(dev, "bogus pgd 0x%x", *pgd); + } + + return ret; +} + +static int omap_iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return 0; +} + +static struct iommu_ops omap_iommu_ops = { + .domain_init = omap_iommu_domain_init, + .domain_destroy = omap_iommu_domain_destroy, + .attach_dev = omap_iommu_attach_dev, + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, + .iova_to_phys = omap_iommu_iova_to_phys, + .domain_has_cap = omap_iommu_domain_has_cap, +}; + +static int __init omap_iommu_init(void) +{ + struct kmem_cache *p; + const unsigned long flags = SLAB_HWCACHE_ALIGN; + size_t align = 1 << 10; /* L2 pagetable alignement */ + + p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, + iopte_cachep_ctor); + if (!p) + return -ENOMEM; + iopte_cachep = p; + + register_iommu(&omap_iommu_ops); + + return platform_driver_register(&omap_iommu_driver); +} +module_init(omap_iommu_init); + +static void __exit omap_iommu_exit(void) +{ + kmem_cache_destroy(iopte_cachep); + + platform_driver_unregister(&omap_iommu_driver); +} +module_exit(omap_iommu_exit); + +MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); +MODULE_ALIAS("platform:omap-iommu"); +MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c new file mode 100644 index 000000000000..809ca124196e --- /dev/null +++ b/drivers/iommu/omap-iovmm.c @@ -0,0 +1,923 @@ +/* + * omap iommu: simple virtual address space management + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/scatterlist.h> +#include <linux/iommu.h> + +#include <asm/cacheflush.h> +#include <asm/mach/map.h> + +#include <plat/iommu.h> +#include <plat/iovmm.h> + +#include <plat/iopgtable.h> + +/* + * A device driver needs to create address mappings between: + * + * - iommu/device address + * - physical address + * - mpu virtual address + * + * There are 4 possible patterns for them: + * + * |iova/ mapping iommu_ page + * | da pa va (d)-(p)-(v) function type + * --------------------------------------------------------------------------- + * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s + * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s + * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s + * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* + * + * + * 'iova': device iommu virtual address + * 'da': alias of 'iova' + * 'pa': physical address + * 'va': mpu virtual address + * + * 'c': contiguous memory area + * 'd': discontiguous memory area + * 'a': anonymous memory allocation + * '()': optional feature + * + * 'n': a normal page(4KB) size is used. + * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. + * + * '*': not yet, but feasible. + */ + +static struct kmem_cache *iovm_area_cachep; + +/* return total bytes of sg buffers */ +static size_t sgtable_len(const struct sg_table *sgt) +{ + unsigned int i, total = 0; + struct scatterlist *sg; + + if (!sgt) + return 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + + bytes = sg->length; + + if (!iopgsz_ok(bytes)) { + pr_err("%s: sg[%d] not iommu pagesize(%x)\n", + __func__, i, bytes); + return 0; + } + + total += bytes; + } + + return total; +} +#define sgtable_ok(x) (!!sgtable_len(x)) + +static unsigned max_alignment(u32 addr) +{ + int i; + unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; + for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) + ; + return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; +} + +/* + * calculate the optimal number sg elements from total bytes based on + * iommu superpages + */ +static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) +{ + unsigned nr_entries = 0, ent_sz; + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) { + pr_err("%s: wrong size %08x\n", __func__, bytes); + return 0; + } + + while (bytes) { + ent_sz = max_alignment(da | pa); + ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); + nr_entries++; + da += ent_sz; + pa += ent_sz; + bytes -= ent_sz; + } + + return nr_entries; +} + +/* allocate and initialize sg_table header(a kind of 'superblock') */ +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, + u32 da, u32 pa) +{ + unsigned int nr_entries; + int err; + struct sg_table *sgt; + + if (!bytes) + return ERR_PTR(-EINVAL); + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) + return ERR_PTR(-EINVAL); + + if (flags & IOVMF_LINEAR) { + nr_entries = sgtable_nents(bytes, da, pa); + if (!nr_entries) + return ERR_PTR(-EINVAL); + } else + nr_entries = bytes / PAGE_SIZE; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); + if (err) { + kfree(sgt); + return ERR_PTR(err); + } + + pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); + + return sgt; +} + +/* free sg_table header(a kind of superblock) */ +static void sgtable_free(struct sg_table *sgt) +{ + if (!sgt) + return; + + sg_free_table(sgt); + kfree(sgt); + + pr_debug("%s: sgt:%p\n", __func__, sgt); +} + +/* map 'sglist' to a contiguous mpu virtual area and return 'va' */ +static void *vmap_sg(const struct sg_table *sgt) +{ + u32 va; + size_t total; + unsigned int i; + struct scatterlist *sg; + struct vm_struct *new; + const struct mem_type *mtype; + + mtype = get_mem_type(MT_DEVICE); + if (!mtype) + return ERR_PTR(-EINVAL); + + total = sgtable_len(sgt); + if (!total) + return ERR_PTR(-EINVAL); + + new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); + if (!new) + return ERR_PTR(-ENOMEM); + va = (u32)new->addr; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + u32 pa; + int err; + + pa = sg_phys(sg); + bytes = sg->length; + + BUG_ON(bytes != PAGE_SIZE); + + err = ioremap_page(va, pa, mtype); + if (err) + goto err_out; + + va += bytes; + } + + flush_cache_vmap((unsigned long)new->addr, + (unsigned long)(new->addr + total)); + return new->addr; + +err_out: + WARN_ON(1); /* FIXME: cleanup some mpu mappings */ + vunmap(new->addr); + return ERR_PTR(-EAGAIN); +} + +static inline void vunmap_sg(const void *va) +{ + vunmap(va); +} + +static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) +{ + struct iovm_struct *tmp; + + list_for_each_entry(tmp, &obj->mmap, list) { + if ((da >= tmp->da_start) && (da < tmp->da_end)) { + size_t len; + + len = tmp->da_end - tmp->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", + __func__, tmp->da_start, da, tmp->da_end, len, + tmp->flags); + + return tmp; + } + } + + return NULL; +} + +/** + * find_iovm_area - find iovma which includes @da + * @da: iommu device virtual address + * + * Find the existing iovma starting at @da + */ +struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) +{ + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + area = __find_iovm_area(obj, da); + mutex_unlock(&obj->mmap_lock); + + return area; +} +EXPORT_SYMBOL_GPL(find_iovm_area); + +/* + * This finds the hole(area) which fits the requested address and len + * in iovmas mmap, and returns the new allocated iovma. + */ +static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + struct iovm_struct *new, *tmp; + u32 start, prev_end, alignment; + + if (!obj || !bytes) + return ERR_PTR(-EINVAL); + + start = da; + alignment = PAGE_SIZE; + + if (~flags & IOVMF_DA_FIXED) { + /* Don't map address 0 */ + start = obj->da_start ? obj->da_start : alignment; + + if (flags & IOVMF_LINEAR) + alignment = iopgsz_max(bytes); + start = roundup(start, alignment); + } else if (start < obj->da_start || start > obj->da_end || + obj->da_end - start < bytes) { + return ERR_PTR(-EINVAL); + } + + tmp = NULL; + if (list_empty(&obj->mmap)) + goto found; + + prev_end = 0; + list_for_each_entry(tmp, &obj->mmap, list) { + + if (prev_end > start) + break; + + if (tmp->da_start > start && (tmp->da_start - start) >= bytes) + goto found; + + if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) + start = roundup(tmp->da_end + 1, alignment); + + prev_end = tmp->da_end; + } + + if ((start >= prev_end) && (obj->da_end - start >= bytes)) + goto found; + + dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", + __func__, da, bytes, flags); + + return ERR_PTR(-EINVAL); + +found: + new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); + if (!new) + return ERR_PTR(-ENOMEM); + + new->iommu = obj; + new->da_start = start; + new->da_end = start + bytes; + new->flags = flags; + + /* + * keep ascending order of iovmas + */ + if (tmp) + list_add_tail(&new->list, &tmp->list); + else + list_add(&new->list, &obj->mmap); + + dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", + __func__, new->da_start, start, new->da_end, bytes, flags); + + return new; +} + +static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) +{ + size_t bytes; + + BUG_ON(!obj || !area); + + bytes = area->da_end - area->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", + __func__, area->da_start, area->da_end, bytes, area->flags); + + list_del(&area->list); + kmem_cache_free(iovm_area_cachep, area); +} + +/** + * da_to_va - convert (d) to (v) + * @obj: objective iommu + * @da: iommu device virtual address + * @va: mpu virtual address + * + * Returns mpu virtual addr which corresponds to a given device virtual addr + */ +void *da_to_va(struct iommu *obj, u32 da) +{ + void *va = NULL; + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + va = area->va; +out: + mutex_unlock(&obj->mmap_lock); + + return va; +} +EXPORT_SYMBOL_GPL(da_to_va); + +static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) +{ + unsigned int i; + struct scatterlist *sg; + void *va = _va; + void *va_end; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *pg; + const size_t bytes = PAGE_SIZE; + + /* + * iommu 'superpage' isn't supported with 'iommu_vmalloc()' + */ + pg = vmalloc_to_page(va); + BUG_ON(!pg); + sg_set_page(sg, pg, bytes, 0); + + va += bytes; + } + + va_end = _va + PAGE_SIZE * i; +} + +static inline void sgtable_drain_vmalloc(struct sg_table *sgt) +{ + /* + * Actually this is not necessary at all, just exists for + * consistency of the code readability. + */ + BUG_ON(!sgt); +} + +static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, + size_t len) +{ + unsigned int i; + struct scatterlist *sg; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + unsigned bytes; + + bytes = max_alignment(da | pa); + bytes = min_t(unsigned, bytes, iopgsz_max(len)); + + BUG_ON(!iopgsz_ok(bytes)); + + sg_set_buf(sg, phys_to_virt(pa), bytes); + /* + * 'pa' is cotinuous(linear). + */ + pa += bytes; + da += bytes; + len -= bytes; + } + BUG_ON(len); +} + +static inline void sgtable_drain_kmalloc(struct sg_table *sgt) +{ + /* + * Actually this is not necessary at all, just exists for + * consistency of the code readability + */ + BUG_ON(!sgt); +} + +/* create 'da' <-> 'pa' mapping from 'sgt' */ +static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, + const struct sg_table *sgt, u32 flags) +{ + int err; + unsigned int i, j; + struct scatterlist *sg; + u32 da = new->da_start; + int order; + + if (!domain || !sgt) + return -EINVAL; + + BUG_ON(!sgtable_ok(sgt)); + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa; + size_t bytes; + + pa = sg_phys(sg); + bytes = sg->length; + + flags &= ~IOVMF_PGSZ_MASK; + + if (bytes_to_iopgsz(bytes) < 0) + goto err_out; + + order = get_order(bytes); + + pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, + i, da, pa, bytes); + + err = iommu_map(domain, da, pa, order, flags); + if (err) + goto err_out; + + da += bytes; + } + return 0; + +err_out: + da = new->da_start; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes; + + bytes = sg->length; + order = get_order(bytes); + + /* ignore failures.. we're already handling one */ + iommu_unmap(domain, da, order); + + da += bytes; + } + return err; +} + +/* release 'da' <-> 'pa' mapping */ +static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, + struct iovm_struct *area) +{ + u32 start; + size_t total = area->da_end - area->da_start; + const struct sg_table *sgt = area->sgt; + struct scatterlist *sg; + int i, err; + + BUG_ON(!sgtable_ok(sgt)); + BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); + + start = area->da_start; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + int order; + + bytes = sg->length; + order = get_order(bytes); + + err = iommu_unmap(domain, start, order); + if (err) + break; + + dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", + __func__, start, bytes, area->flags); + + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); + + total -= bytes; + start += bytes; + } + BUG_ON(total); +} + +/* template function for all unmapping */ +static struct sg_table *unmap_vm_area(struct iommu_domain *domain, + struct iommu *obj, const u32 da, + void (*fn)(const void *), u32 flags) +{ + struct sg_table *sgt = NULL; + struct iovm_struct *area; + + if (!IS_ALIGNED(da, PAGE_SIZE)) { + dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); + return NULL; + } + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + + if ((area->flags & flags) != flags) { + dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, + area->flags); + goto out; + } + sgt = (struct sg_table *)area->sgt; + + unmap_iovm_area(domain, obj, area); + + fn(area->va); + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, + area->da_start, da, area->da_end, + area->da_end - area->da_start, area->flags); + + free_iovm_area(obj, area); +out: + mutex_unlock(&obj->mmap_lock); + + return sgt; +} + +static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj, + u32 da, const struct sg_table *sgt, void *va, + size_t bytes, u32 flags) +{ + int err = -ENOMEM; + struct iovm_struct *new; + + mutex_lock(&obj->mmap_lock); + + new = alloc_iovm_area(obj, da, bytes, flags); + if (IS_ERR(new)) { + err = PTR_ERR(new); + goto err_alloc_iovma; + } + new->va = va; + new->sgt = sgt; + + if (map_iovm_area(domain, new, sgt, new->flags)) + goto err_map; + + mutex_unlock(&obj->mmap_lock); + + dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", + __func__, new->da_start, bytes, new->flags, va); + + return new->da_start; + +err_map: + free_iovm_area(obj, new); +err_alloc_iovma: + mutex_unlock(&obj->mmap_lock); + return err; +} + +static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, + u32 da, const struct sg_table *sgt, + void *va, size_t bytes, u32 flags) +{ + return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); +} + +/** + * iommu_vmap - (d)-(p)-(v) address mapper + * @obj: objective iommu + * @sgt: address of scatter gather table + * @flags: iovma and page property + * + * Creates 1-n-1 mapping with given @sgt and returns @da. + * All @sgt element must be io page size aligned. + */ +u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, + const struct sg_table *sgt, u32 flags) +{ + size_t bytes; + void *va = NULL; + + if (!obj || !obj->dev || !sgt) + return -EINVAL; + + bytes = sgtable_len(sgt); + if (!bytes) + return -EINVAL; + bytes = PAGE_ALIGN(bytes); + + if (flags & IOVMF_MMIO) { + va = vmap_sg(sgt); + if (IS_ERR(va)) + return PTR_ERR(va); + } + + flags |= IOVMF_DISCONT; + flags |= IOVMF_MMIO; + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + vunmap_sg(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_vmap); + +/** + * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Free the iommu virtually contiguous memory area starting at + * @da, which was returned by 'iommu_vmap()'. + */ +struct sg_table * +iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + /* + * 'sgt' is allocated before 'iommu_vmalloc()' is called. + * Just returns 'sgt' to the caller to free + */ + sgt = unmap_vm_area(domain, obj, da, vunmap_sg, + IOVMF_DISCONT | IOVMF_MMIO); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + return sgt; +} +EXPORT_SYMBOL_GPL(iommu_vunmap); + +/** + * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @bytes: allocation size + * @flags: iovma and page property + * + * Allocate @bytes linearly and creates 1-n-1 mapping and returns + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + void *va; + struct sg_table *sgt; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = vmalloc(bytes); + if (!va) + return -ENOMEM; + + flags |= IOVMF_DISCONT; + flags |= IOVMF_ALLOC; + + sgt = sgtable_alloc(bytes, flags, da, 0); + if (IS_ERR(sgt)) { + da = PTR_ERR(sgt); + goto err_sgt_alloc; + } + sgtable_fill_vmalloc(sgt, va); + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + goto err_iommu_vmap; + + return da; + +err_iommu_vmap: + sgtable_drain_vmalloc(sgt); + sgtable_free(sgt); +err_sgt_alloc: + vfree(va); + return da; +} +EXPORT_SYMBOL_GPL(iommu_vmalloc); + +/** + * iommu_vfree - release memory allocated by 'iommu_vmalloc()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually continuous memory area starting at + * @da, as obtained from 'iommu_vmalloc()'. + */ +void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) +{ + struct sg_table *sgt; + + sgt = unmap_vm_area(domain, obj, da, vfree, + IOVMF_DISCONT | IOVMF_ALLOC); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_vfree); + +static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj, + u32 da, u32 pa, void *va, size_t bytes, u32 flags) +{ + struct sg_table *sgt; + + sgt = sgtable_alloc(bytes, flags, da, pa); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + sgtable_fill_kmalloc(sgt, pa, da, bytes); + + da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) { + sgtable_drain_kmalloc(sgt); + sgtable_free(sgt); + } + + return da; +} + +/** + * iommu_kmap - (d)-(p)-(v) address mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @pa: contiguous physical memory + * @flags: iovma and page property + * + * Creates 1-1-1 mapping and returns @da again, which can be + * adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa, + size_t bytes, u32 flags) +{ + void *va; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = ioremap(pa, bytes); + if (!va) + return -ENOMEM; + + flags |= IOVMF_LINEAR; + flags |= IOVMF_MMIO; + + da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); + if (IS_ERR_VALUE(da)) + iounmap(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_kmap); + +/** + * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually contiguous memory area starting at + * @da, which was passed to and was returned by'iommu_kmap()'. + */ +void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + typedef void (*func_t)(const void *); + + sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap, + IOVMF_LINEAR | IOVMF_MMIO); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_kunmap); + +/** + * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @bytes: bytes for allocation + * @flags: iovma and page property + * + * Allocate @bytes linearly and creates 1-1-1 mapping and returns + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + void *va; + u32 pa; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); + if (!va) + return -ENOMEM; + pa = virt_to_phys(va); + + flags |= IOVMF_LINEAR; + flags |= IOVMF_ALLOC; + + da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); + if (IS_ERR_VALUE(da)) + kfree(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_kmalloc); + +/** + * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually contiguous memory area starting at + * @da, which was passed to and was returned by'iommu_kmalloc()'. + */ +void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + + sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_kfree); + + +static int __init iovmm_init(void) +{ + const unsigned long flags = SLAB_HWCACHE_ALIGN; + struct kmem_cache *p; + + p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, + flags, NULL); + if (!p) + return -ENOMEM; + iovm_area_cachep = p; + + return 0; +} +module_init(iovmm_init); + +static void __exit iovmm_exit(void) +{ + kmem_cache_destroy(iovm_area_cachep); +} +module_exit(iovmm_exit); + +MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); +MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index f574dc012cad..6a25fad56655 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -763,7 +763,7 @@ source "drivers/media/video/m5mols/Kconfig" config VIDEO_OMAP3 tristate "OMAP 3 Camera support (EXPERIMENTAL)" - select OMAP_IOMMU + select OMAP_IOVMM depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL ---help--- Driver for an OMAP 3 camera controller. |