diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-03-29 21:42:27 +0200 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-04-02 18:13:12 +0200 |
commit | 7a7039ee71811222310b431aee246eb78dd0d401 (patch) | |
tree | 14d3c560a0053f88a3be2d7a7c31749b148c5d83 /arch | |
parent | arch/tile: don't enable irqs unconditionally in page fault handler (diff) | |
download | linux-7a7039ee71811222310b431aee246eb78dd0d401.tar.xz linux-7a7039ee71811222310b431aee246eb78dd0d401.zip |
arch/tile: fix bug in loading kernels larger than 16 MB
Previously we only handled kernels up to a single huge page in size.
Now we create additional PTEs appropriately.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/tile/mm/init.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 830c4908ea76..8400d3fb9e0a 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) address = MEM_SV_INTRPT; pmd = get_pmd(pgtables, address); + pfn = 0; /* code starts at PA 0 */ if (ktext_small) { /* Allocate an L2 PTE for the kernel text */ int cpu = 0; @@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) } BUG_ON(address != (unsigned long)_stext); - pfn = 0; /* code starts at PA 0 */ - pte = alloc_pte(); - for (pte_ofs = 0; address < (unsigned long)_einittext; - pfn++, pte_ofs++, address += PAGE_SIZE) { + pte = NULL; + for (; address < (unsigned long)_einittext; + pfn++, address += PAGE_SIZE) { + pte_ofs = pte_index(address); + if (pte_ofs == 0) { + if (pte) + assign_pte(pmd++, pte); + pte = alloc_pte(); + } if (!ktext_local) { prot = set_remote_cache_cpu(prot, cpu); cpu = cpumask_next(cpu, &ktext_mask); @@ -591,7 +597,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) } pte[pte_ofs] = pfn_pte(pfn, prot); } - assign_pte(pmd, pte); + if (pte) + assign_pte(pmd, pte); } else { pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); pteval = pte_mkhuge(pteval); @@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) else pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_CACHE_NO_L3); - *(pte_t *)pmd = pteval; + for (; address < (unsigned long)_einittext; + pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE) + *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); } /* Set swapper_pgprot here so it is flushed to memory right away. */ |