summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/init.c21
-rw-r--r--arch/sh/mm/tlb-sh3.c19
2 files changed, 14 insertions, 26 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4e9c854845a4..e342565f75fb 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -51,11 +51,6 @@ unsigned long mmu_context_cache = NO_CONTEXT;
#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
#endif
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t discontig_page_data[MAX_NUMNODES];
-bootmem_data_t discontig_node_bdata[MAX_NUMNODES];
-#endif
-
void (*copy_page)(void *from, void *to);
void (*clear_page)(void *to);
@@ -216,15 +211,6 @@ void __init paging_init(void)
#endif
NODE_DATA(0)->node_mem_map = NULL;
free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
-
-#ifdef CONFIG_DISCONTIGMEM
- /*
- * And for discontig, do some more fixups on the zone sizes..
- */
- zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = 0;
- free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
-#endif
}
void __init mem_init(void)
@@ -248,7 +234,7 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
- /*
+ /*
* Setup wrappers for copy/clear_page(), these will get overridden
* later in the boot process if a better method is available.
*/
@@ -257,9 +243,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem_node(NODE_DATA(0));
-#ifdef CONFIG_DISCONTIGMEM
- totalram_pages += free_all_bootmem_node(NODE_DATA(1));
-#endif
reservedpages = 0;
for (tmp = 0; tmp < num_physpages; tmp++)
/*
@@ -286,7 +269,7 @@ void __init mem_init(void)
void free_initmem(void)
{
unsigned long addr;
-
+
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 7a0d5c10bf20..46b09e26e082 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -40,12 +40,17 @@ void update_mmu_cache(struct vm_area_struct * vma,
return;
#if defined(CONFIG_SH7705_CACHE_32KB)
- struct page *page;
- page = pte_page(pte);
- if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
+ {
+ struct page *page = pte_page(pte);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
}
#endif
@@ -80,7 +85,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
*/
addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
-
+
if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) {
addr |= MMU_PAGE_ASSOC_BIT;
ways = 1; /* we already know the way .. */