diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 101 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 68 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 107 | ||||
-rw-r--r-- | arch/sh/mm/numa.c | 92 | ||||
-rw-r--r-- | arch/sh/mm/pg-dma.c | 95 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 2 |
7 files changed, 267 insertions, 203 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 253346d7b316..70da1c8d407e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -1,5 +1,3 @@ -menu "Processor selection" - # # Processor families # @@ -38,27 +36,31 @@ config CPU_SUBTYPE_ST40 config CPU_SHX2 bool +config CPU_SHX3 + bool + +choice + prompt "Processor sub-type selection" + # # Processor subtypes # -comment "SH-2 Processor Support" - -config CPU_SUBTYPE_SH7604 - bool "Support SH7604 processor" - select CPU_SH2 +# SH-2 Processor Support config CPU_SUBTYPE_SH7619 bool "Support SH7619 processor" select CPU_SH2 + select CPU_HAS_IPR_IRQ -comment "SH-2A Processor Support" +# SH-2A Processor Support config CPU_SUBTYPE_SH7206 bool "Support SH7206 processor" select CPU_SH2A + select CPU_HAS_IPR_IRQ -comment "SH-3 Processor Support" +# SH-3 Processor Support config CPU_SUBTYPE_SH7300 bool "Support SH7300 processor" @@ -113,19 +115,19 @@ config CPU_SUBTYPE_SH7712 help Select SH7712 if you have a SH3-DSP SH7712 CPU. -comment "SH-4 Processor Support" +# SH-4 Processor Support config CPU_SUBTYPE_SH7750 bool "Support SH7750 processor" select CPU_SH4 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ help Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. config CPU_SUBTYPE_SH7091 bool "Support SH7091 processor" select CPU_SH4 - select CPU_SUBTYPE_SH7750 + select CPU_HAS_INTC_IRQ help Select SH7091 if you have an SH-4 based Sega device (such as the Dreamcast, Naomi, and Naomi 2). @@ -133,19 +135,17 @@ config CPU_SUBTYPE_SH7091 config CPU_SUBTYPE_SH7750R bool "Support SH7750R processor" select CPU_SH4 - select CPU_SUBTYPE_SH7750 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ config CPU_SUBTYPE_SH7750S bool "Support SH7750S processor" select CPU_SH4 - select CPU_SUBTYPE_SH7750 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ config CPU_SUBTYPE_SH7751 bool "Support SH7751 processor" select CPU_SH4 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ help Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, or if you have a HD6417751R CPU. @@ -153,8 +153,7 @@ config CPU_SUBTYPE_SH7751 config CPU_SUBTYPE_SH7751R bool "Support SH7751R processor" select CPU_SH4 - select CPU_SUBTYPE_SH7751 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" @@ -166,7 +165,7 @@ config CPU_SUBTYPE_SH4_202 bool "Support SH4-202 processor" select CPU_SH4 -comment "ST40 Processor Support" +# ST40 Processor Support config CPU_SUBTYPE_ST40STB1 bool "Support ST40STB1/ST40RA processors" @@ -181,7 +180,7 @@ config CPU_SUBTYPE_ST40GX1 help Select ST40GX1 if you have a ST40GX1 CPU. -comment "SH-4A Processor Support" +# SH-4A Processor Support config CPU_SUBTYPE_SH7770 bool "Support SH7770 processor" @@ -190,7 +189,7 @@ config CPU_SUBTYPE_SH7770 config CPU_SUBTYPE_SH7780 bool "Support SH7780 processor" select CPU_SH4A - select CPU_HAS_INTC2_IRQ + select CPU_HAS_INTC_IRQ config CPU_SUBTYPE_SH7785 bool "Support SH7785 processor" @@ -198,7 +197,13 @@ config CPU_SUBTYPE_SH7785 select CPU_SHX2 select CPU_HAS_INTC2_IRQ -comment "SH4AL-DSP Processor Support" +config CPU_SUBTYPE_SHX3 + bool "Support SH-X3 processor" + select CPU_SH4A + select CPU_SHX3 + select CPU_HAS_INTC2_IRQ + +# SH4AL-DSP Processor Support config CPU_SUBTYPE_SH73180 bool "Support SH73180 processor" @@ -212,9 +217,11 @@ config CPU_SUBTYPE_SH7722 bool "Support SH7722 processor" select CPU_SH4AL_DSP select CPU_SHX2 - select CPU_HAS_IPR_IRQ + select CPU_HAS_INTC_IRQ + select ARCH_SPARSEMEM_ENABLE + select SYS_SUPPORTS_NUMA -endmenu +endchoice menu "Memory management options" @@ -266,7 +273,7 @@ config MEMORY_SIZE config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) + depends on MMU && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) default y help If you say Y here, physical addressing will be extended to @@ -295,6 +302,17 @@ config VSYSCALL For systems with an MMU that can afford to give up a page, (the default value) say Y. +config NUMA + bool "Non Uniform Memory Access (NUMA) Support" + depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL + default n + help + Some SH systems have many various memories scattered around + the address space, each with varying latencies. This enables + support for these blocks by binding them to nodes and allowing + memory policies to be used for prioritizing and controlling + allocation behaviour. + config NODES_SHIFT int default "1" @@ -302,14 +320,34 @@ config NODES_SHIFT config ARCH_FLATMEM_ENABLE def_bool y + depends on !NUMA + +config ARCH_SPARSEMEM_ENABLE + def_bool y + select SPARSEMEM_STATIC + +config ARCH_SPARSEMEM_DEFAULT + def_bool y config MAX_ACTIVE_REGIONS int + default "2" if (CPU_SUBTYPE_SH7722 && SPARSEMEM) default "1" config ARCH_POPULATES_NODE_MAP def_bool y +config ARCH_SELECT_MEMORY_MODEL + def_bool y + +config ARCH_ENABLE_MEMORY_HOTPLUG + def_bool y + depends on SPARSEMEM + +config ARCH_MEMORY_PROBE + def_bool y + depends on MEMORY_HOTPLUG + choice prompt "Kernel page size" default PAGE_SIZE_4KB @@ -394,15 +432,4 @@ config SH_WRITETHROUGH If unsure, say N. -config SH_OCRAM - bool "Operand Cache RAM (OCRAM) support" - help - Selecting this option will automatically tear down the number of - sets in the dcache by half, which in turn exposes a memory range. - - The addresses for the OC RAM base will vary according to the - processor version. Consult vendor documentation for specifics. - - If unsure, say N. - endmenu diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3ffd7f68c0a2..d677d7f3afc1 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -8,9 +8,6 @@ obj-$(CONFIG_CPU_SH2) += cache-sh2.o obj-$(CONFIG_CPU_SH3) += cache-sh3.o obj-$(CONFIG_CPU_SH4) += cache-sh4.o -obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o - mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \ ioremap.o @@ -27,5 +24,7 @@ obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o endif +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o obj-$(CONFIG_32BIT) += pmb.o +obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index c878faa4ae46..964c6767dc73 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -32,8 +32,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; - unsigned long page; int si_code; + int fault; siginfo_t info; trace_hardirqs_on(); @@ -125,20 +125,18 @@ good_area: * the fault. */ survive: - switch (handle_mm_fault(mm, vma, address, writeaccess)) { - case VM_FAULT_MINOR: - tsk->min_flt++; - break; - case VM_FAULT_MAJOR: - tsk->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: + fault = handle_mm_fault(mm, vma, address, writeaccess); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) goto out_of_memory; - default: - BUG(); + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); } + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; up_read(&mm->mmap_sem); return; @@ -170,24 +168,38 @@ no_context: * terminate things with extreme prejudice. * */ - if (address < PAGE_SIZE) - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); - else - printk(KERN_ALERT "Unable to handle kernel paging request"); - printk(" at virtual address %08lx\n", address); - printk(KERN_ALERT "pc = %08lx\n", regs->pc); - page = (unsigned long)get_TTB(); - if (page) { - page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; - printk(KERN_ALERT "*pde = %08lx\n", page); - if (page & _PAGE_PRESENT) { - page &= PAGE_MASK; - address &= 0x003ff000; - page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; - printk(KERN_ALERT "*pte = %08lx\n", page); + + bust_spinlocks(1); + + if (oops_may_print()) { + __typeof__(pte_val(__pte(0))) page; + + if (address < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL " + "pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel paging " + "request"); + printk(" at virtual address %08lx\n", address); + printk(KERN_ALERT "pc = %08lx\n", regs->pc); + page = (unsigned long)get_TTB(); + if (page) { + page = ((__typeof__(page) *) __va(page))[address >> + PGDIR_SHIFT]; + printk(KERN_ALERT "*pde = %08lx\n", page); + if (page & _PAGE_PRESENT) { + page &= PAGE_MASK; + address &= 0x003ff000; + page = ((__typeof__(page) *) + __va(page))[address >> + PAGE_SHIFT]; + printk(KERN_ALERT "*pte = %08lx\n", page); + } } } + die("Oops", regs, writeaccess); + bust_spinlocks(0); do_exit(SIGKILL); /* diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index e0e644ff3204..82b68c789a5f 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -18,6 +18,7 @@ #include <asm/mmu_context.h> #include <asm/tlb.h> #include <asm/cacheflush.h> +#include <asm/sections.h> #include <asm/cache.h> DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -36,14 +37,11 @@ void show_mem(void) show_free_areas(); for_each_online_pgdat(pgdat) { - struct page *page, *end; - unsigned long flags; + unsigned long flags, i; pgdat_resize_lock(pgdat, &flags); - page = pgdat->node_mem_map; - end = page + pgdat->node_spanned_pages; - - do { + for (i = 0; i < pgdat->node_spanned_pages; i++) { + struct page *page = pgdat_page_nr(pgdat, i); total++; if (PageReserved(page)) reserved++; @@ -55,9 +53,7 @@ void show_mem(void) free++; else shared += page_count(page) - 1; - page++; - } while (page < end); - + } pgdat_resize_unlock(pgdat, &flags); } @@ -137,16 +133,12 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) } #endif /* CONFIG_MMU */ -/* References to section boundaries */ - -extern char _text, _etext, _edata, __bss_start, _end; -extern char __init_begin, __init_end; - /* * paging_init() sets up the page tables */ void __init paging_init(void) { + unsigned long max_zone_pfns[MAX_NR_ZONES]; int nid; /* We don't need to map the kernel through the TLB, as @@ -158,43 +150,39 @@ void __init paging_init(void) * check for a null value. */ set_TTB(swapper_pg_dir); + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); - unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long low, start_pfn; - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT; low = pgdat->bdata->node_low_pfn; - max_zone_pfns[ZONE_NORMAL] = low; - add_active_range(nid, start_pfn, low); + if (max_zone_pfns[ZONE_NORMAL] < low) + max_zone_pfns[ZONE_NORMAL] = low; printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", nid, start_pfn, low); - - free_area_init_nodes(max_zone_pfns); - - printk("Node %u: mem_map starts at %p\n", - pgdat->node_id, pgdat->node_mem_map); } + + free_area_init_nodes(max_zone_pfns); } static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; + int codesize, datasize, initsize; int nid; - reservedpages = 0; + num_physpages = 0; + high_memory = NULL; for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long node_pages = 0; void *node_high_memory; - int i; num_physpages += pgdat->node_present_pages; @@ -203,13 +191,9 @@ void __init mem_init(void) totalram_pages += node_pages; - for (i = 0; i < node_pages; i++) - if (PageReserved(pgdat->node_mem_map + i)) - reservedpages++; - - node_high_memory = (void *)((pgdat->node_start_pfn + - pgdat->node_spanned_pages) << - PAGE_SHIFT); + node_high_memory = (void *)__va((pgdat->node_start_pfn + + pgdat->node_spanned_pages) << + PAGE_SHIFT); if (node_high_memory > high_memory) high_memory = node_high_memory; } @@ -239,11 +223,10 @@ void __init mem_init(void) VMALLOC_END - VMALLOC_START); printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", + "%dk data, %dk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - totalram_pages << (PAGE_SHIFT-10), + num_physpages << (PAGE_SHIFT-10), codesize >> 10, - reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10); @@ -264,7 +247,9 @@ void free_initmem(void) free_page(addr); totalram_pages++; } - printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); + printk("Freeing unused kernel memory: %ldk freed\n", + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD @@ -277,6 +262,50 @@ void free_initrd_mem(unsigned long start, unsigned long end) free_page(p); totalram_pages++; } - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); +} +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +void online_page(struct page *page) +{ + ClearPageReserved(page); + init_page_count(page); + __free_page(page); + totalram_pages++; + num_physpages++; } + +int arch_add_memory(int nid, u64 start, u64 size) +{ + pg_data_t *pgdat; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + pgdat = NODE_DATA(nid); + + /* We only have ZONE_NORMAL, so this is easy.. */ + ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages); + if (unlikely(ret)) + printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(arch_add_memory); + +int remove_memory(u64 start, u64 size) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(remove_memory); + +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 addr) +{ + /* Node 0 for now.. */ + return 0; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif #endif diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c new file mode 100644 index 000000000000..8aff065dd307 --- /dev/null +++ b/arch/sh/mm/numa.c @@ -0,0 +1,92 @@ +/* + * arch/sh/mm/numa.c - Multiple node support for SH machines + * + * Copyright (C) 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/module.h> +#include <linux/bootmem.h> +#include <linux/mm.h> +#include <linux/numa.h> +#include <linux/pfn.h> +#include <asm/sections.h> + +static bootmem_data_t plat_node_bdata[MAX_NUMNODES]; +struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL_GPL(node_data); + +/* + * On SH machines the conventional approach is to stash system RAM + * in node 0, and other memory blocks in to node 1 and up, ordered by + * latency. Each node's pgdat is node-local at the beginning of the node, + * immediately followed by the node mem map. + */ +void __init setup_memory(void) +{ + unsigned long free_pfn = PFN_UP(__pa(_end)); + + /* + * Node 0 sets up its pgdat at the first available pfn, + * and bumps it up before setting up the bootmem allocator. + */ + NODE_DATA(0) = pfn_to_kaddr(free_pfn); + memset(NODE_DATA(0), 0, sizeof(struct pglist_data)); + free_pfn += PFN_UP(sizeof(struct pglist_data)); + NODE_DATA(0)->bdata = &plat_node_bdata[0]; + + /* Set up node 0 */ + setup_bootmem_allocator(free_pfn); + + /* Give the platforms a chance to hook up their nodes */ + plat_mem_setup(); +} + +void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) +{ + unsigned long bootmap_pages, bootmap_start, bootmap_size; + unsigned long start_pfn, free_pfn, end_pfn; + + /* Don't allow bogus node assignment */ + BUG_ON(nid > MAX_NUMNODES || nid == 0); + + /* + * The free pfn starts at the beginning of the range, and is + * advanced as necessary for pgdat and node map allocations. + */ + free_pfn = start_pfn = start >> PAGE_SHIFT; + end_pfn = end >> PAGE_SHIFT; + + add_active_range(nid, start_pfn, end_pfn); + + /* Node-local pgdat */ + NODE_DATA(nid) = pfn_to_kaddr(free_pfn); + free_pfn += PFN_UP(sizeof(struct pglist_data)); + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + + NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; + + /* Node-local bootmap */ + bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); + bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn); + bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn, + end_pfn); + + free_bootmem_with_active_regions(nid, end_pfn); + + /* Reserve the pgdat and bootmap space with the bootmem allocator */ + reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, + sizeof(struct pglist_data)); + reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT, + bootmap_pages << PAGE_SHIFT); + + /* It's up */ + node_set_online(nid); + + /* Kick sparsemem */ + sparse_memory_present_with_active_regions(nid); +} diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c deleted file mode 100644 index bb23679369d6..000000000000 --- a/arch/sh/mm/pg-dma.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * arch/sh/mm/pg-dma.c - * - * Fast clear_page()/copy_page() implementation using the SH DMAC - * - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <asm/semaphore.h> -#include <asm/mmu_context.h> -#include <asm/addrspace.h> -#include <asm/atomic.h> -#include <asm/page.h> -#include <asm/dma.h> -#include <asm/io.h> - -/* Channel to use for page ops, must be dual-address mode capable. */ -static int dma_channel = CONFIG_DMA_PAGE_OPS_CHANNEL; - -static void copy_page_dma(void *to, void *from) -{ - /* - * This doesn't seem to get triggered until further along in the - * boot process, at which point the DMAC is already initialized. - * Fix this in the same fashion as clear_page_dma() in the event - * that this crashes due to the DMAC not being initialized. - */ - - flush_icache_range((unsigned long)from, PAGE_SIZE); - dma_write_page(dma_channel, (unsigned long)from, (unsigned long)to); - dma_wait_for_completion(dma_channel); -} - -static void clear_page_dma(void *to) -{ - /* - * We get invoked quite early on, if the DMAC hasn't been initialized - * yet, fall back on the slow manual implementation. - */ - if (dma_info[dma_channel].chan != dma_channel) { - clear_page_slow(to); - return; - } - - dma_write_page(dma_channel, (unsigned long)empty_zero_page, - (unsigned long)to); - - /* - * FIXME: Something is a bit racy here, if we poll the counter right - * away, we seem to lock. flushing the page from the dcache doesn't - * seem to make a difference one way or the other, though either a full - * icache or dcache flush does. - * - * The location of this is important as well, and must happen prior to - * the completion loop but after the transfer was initiated. - * - * Oddly enough, this doesn't appear to be an issue for copy_page().. - */ - flush_icache_range((unsigned long)to, PAGE_SIZE); - - dma_wait_for_completion(dma_channel); -} - -static int __init pg_dma_init(void) -{ - int ret; - - ret = request_dma(dma_channel, "page ops"); - if (ret != 0) - return ret; - - copy_page = copy_page_dma; - clear_page = clear_page_dma; - - return ret; -} - -static void __exit pg_dma_exit(void) -{ - free_dma(dma_channel); -} - -module_init(pg_dma_init); -module_exit(pg_dma_exit); - -MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); -MODULE_DESCRIPTION("Optimized page copy/clear routines using a dual-address mode capable DMAC channel"); -MODULE_LICENSE("GPL"); - diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b6a5a338145b..a08a4a958add 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -310,7 +310,7 @@ static int __init pmb_init(void) BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, - SLAB_PANIC, pmb_cache_ctor, NULL); + SLAB_PANIC, pmb_cache_ctor); jump_to_P2(); |