diff options
author | Adrian Bunk <bunk@kernel.org> | 2008-06-05 20:41:51 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-07-18 06:38:01 +0200 |
commit | 50215d6511265d46ba14038640b16c5dd7731ff4 (patch) | |
tree | e8c70718dcb89aee9fc7ee4f42e22f3a382a85c6 /arch/sparc/mm/srmmu.c | |
parent | sparc/kernel/: possible cleanups (diff) | |
download | linux-50215d6511265d46ba14038640b16c5dd7731ff4.tar.xz linux-50215d6511265d46ba14038640b16c5dd7731ff4.zip |
sparc/mm/: possible cleanups
This patch contains the following possible cleanups:
- make the following needlessly global code static:
- fault.c: force_user_fault()
- init.c: calc_max_low_pfn()
- init.c: pgt_cache_water[]
- init.c: map_high_region()
- srmmu.c: hwbug_bitmask
- srmmu.c: srmmu_swapper_pg_dir
- srmmu.c: srmmu_context_table
- srmmu.c: is_hypersparc
- srmmu.c: srmmu_cache_pagetables
- srmmu.c: srmmu_nocache_size
- srmmu.c: srmmu_nocache_end
- srmmu.c: srmmu_get_nocache()
- srmmu.c: srmmu_free_nocache()
- srmmu.c: srmmu_early_allocate_ptable_skeleton()
- srmmu.c: srmmu_nocache_calcsize()
- srmmu.c: srmmu_nocache_init()
- srmmu.c: srmmu_alloc_thread_info()
- srmmu.c: early_pgtable_allocfail()
- srmmu.c: srmmu_early_allocate_ptable_skeleton()
- srmmu.c: srmmu_allocate_ptable_skeleton()
- srmmu.c: srmmu_inherit_prom_mappings()
- sunami.S: tsunami_copy_1page
- remove the following unused code:
- init.c: struct sparc_aliases
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r-- | arch/sparc/mm/srmmu.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 23d3291a3e81..c624e04ff03e 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -50,7 +50,7 @@ #include <asm/btfixup.h> enum mbus_module srmmu_modtype; -unsigned int hwbug_bitmask; +static unsigned int hwbug_bitmask; int vac_cache_size; int vac_line_size; @@ -60,7 +60,7 @@ extern unsigned long last_valid_pfn; extern unsigned long page_kernel; -pgd_t *srmmu_swapper_pg_dir; +static pgd_t *srmmu_swapper_pg_dir; #ifdef CONFIG_SMP #define FLUSH_BEGIN(mm) @@ -83,12 +83,12 @@ BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; -ctxd_t *srmmu_context_table; +static ctxd_t *srmmu_context_table; int viking_mxcc_present; static DEFINE_SPINLOCK(srmmu_context_spinlock); -int is_hypersparc; +static int is_hypersparc; /* * In general all page table modifications should use the V8 atomic @@ -112,11 +112,11 @@ static inline int srmmu_device_memory(unsigned long x) return ((x & 0xF0000000) != 0); } -int srmmu_cache_pagetables; +static int srmmu_cache_pagetables; /* these will be initialized in srmmu_nocache_calcsize() */ -unsigned long srmmu_nocache_size; -unsigned long srmmu_nocache_end; +static unsigned long srmmu_nocache_size; +static unsigned long srmmu_nocache_end; /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) @@ -324,7 +324,7 @@ static unsigned long __srmmu_get_nocache(int size, int align) return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); } -unsigned inline long srmmu_get_nocache(int size, int align) +static unsigned long srmmu_get_nocache(int size, int align) { unsigned long tmp; @@ -336,7 +336,7 @@ unsigned inline long srmmu_get_nocache(int size, int align) return tmp; } -void srmmu_free_nocache(unsigned long vaddr, int size) +static void srmmu_free_nocache(unsigned long vaddr, int size) { int offset; @@ -369,7 +369,8 @@ void srmmu_free_nocache(unsigned long vaddr, int size) bit_map_clear(&srmmu_nocache_map, offset, size); } -void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); +static void srmmu_early_allocate_ptable_skeleton(unsigned long start, + unsigned long end); extern unsigned long probe_memory(void); /* in fault.c */ @@ -377,7 +378,7 @@ extern unsigned long probe_memory(void); /* in fault.c */ * Reserve nocache dynamically proportionally to the amount of * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 */ -void srmmu_nocache_calcsize(void) +static void srmmu_nocache_calcsize(void) { unsigned long sysmemavail = probe_memory() / 1024; int srmmu_nocache_npages; @@ -398,7 +399,7 @@ void srmmu_nocache_calcsize(void) srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; } -void __init srmmu_nocache_init(void) +static void __init srmmu_nocache_init(void) { unsigned int bitmap_bits; pgd_t *pgd; @@ -645,7 +646,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) * mappings on the kernel stack without any special code as we did * need on the sun4c. */ -struct thread_info *srmmu_alloc_thread_info(void) +static struct thread_info *srmmu_alloc_thread_info(void) { struct thread_info *ret; @@ -1045,13 +1046,14 @@ extern void hypersparc_setup_blockops(void); * around 8mb mapped for us. */ -void __init early_pgtable_allocfail(char *type) +static void __init early_pgtable_allocfail(char *type) { prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); prom_halt(); } -void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) +static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, + unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; @@ -1081,7 +1083,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l } } -void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) +static void __init srmmu_allocate_ptable_skeleton(unsigned long start, + unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; @@ -1116,7 +1119,8 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ -void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) +static void __init srmmu_inherit_prom_mappings(unsigned long start, + unsigned long end) { pgd_t *pgdp; pmd_t *pmdp; |