summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/lmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/lmb.c')
-rw-r--r--arch/ppc64/kernel/lmb.c151
1 files changed, 39 insertions, 112 deletions
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
index d6c6bd03d2a4..5adaca2ddc9d 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/ppc64/kernel/lmb.c
@@ -28,33 +28,28 @@ void lmb_dump_all(void)
{
#ifdef DEBUG
unsigned long i;
- struct lmb *_lmb = &lmb;
udbg_printf("lmb_dump_all:\n");
udbg_printf(" memory.cnt = 0x%lx\n",
- _lmb->memory.cnt);
+ lmb.memory.cnt);
udbg_printf(" memory.size = 0x%lx\n",
- _lmb->memory.size);
- for (i=0; i < _lmb->memory.cnt ;i++) {
+ lmb.memory.size);
+ for (i=0; i < lmb.memory.cnt ;i++) {
udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
- i, _lmb->memory.region[i].base);
- udbg_printf(" .physbase = 0x%lx\n",
- _lmb->memory.region[i].physbase);
+ i, lmb.memory.region[i].base);
udbg_printf(" .size = 0x%lx\n",
- _lmb->memory.region[i].size);
+ lmb.memory.region[i].size);
}
udbg_printf("\n reserved.cnt = 0x%lx\n",
- _lmb->reserved.cnt);
+ lmb.reserved.cnt);
udbg_printf(" reserved.size = 0x%lx\n",
- _lmb->reserved.size);
- for (i=0; i < _lmb->reserved.cnt ;i++) {
+ lmb.reserved.size);
+ for (i=0; i < lmb.reserved.cnt ;i++) {
udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
- i, _lmb->reserved.region[i].base);
- udbg_printf(" .physbase = 0x%lx\n",
- _lmb->reserved.region[i].physbase);
+ i, lmb.reserved.region[i].base);
udbg_printf(" .size = 0x%lx\n",
- _lmb->reserved.region[i].size);
+ lmb.reserved.region[i].size);
}
#endif /* DEBUG */
}
@@ -98,7 +93,6 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
rgn->region[r1].size += rgn->region[r2].size;
for (i=r2; i < rgn->cnt-1; i++) {
rgn->region[i].base = rgn->region[i+1].base;
- rgn->region[i].physbase = rgn->region[i+1].physbase;
rgn->region[i].size = rgn->region[i+1].size;
}
rgn->cnt--;
@@ -108,49 +102,29 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
void __init
lmb_init(void)
{
- struct lmb *_lmb = &lmb;
-
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
- _lmb->memory.region[0].base = 0;
- _lmb->memory.region[0].size = 0;
- _lmb->memory.cnt = 1;
+ lmb.memory.region[0].base = 0;
+ lmb.memory.region[0].size = 0;
+ lmb.memory.cnt = 1;
/* Ditto. */
- _lmb->reserved.region[0].base = 0;
- _lmb->reserved.region[0].size = 0;
- _lmb->reserved.cnt = 1;
+ lmb.reserved.region[0].base = 0;
+ lmb.reserved.region[0].size = 0;
+ lmb.reserved.cnt = 1;
}
/* This routine called with relocation disabled. */
void __init
lmb_analyze(void)
{
- unsigned long i;
- unsigned long mem_size = 0;
- unsigned long size_mask = 0;
- struct lmb *_lmb = &lmb;
-#ifdef CONFIG_MSCHUNKS
- unsigned long physbase = 0;
-#endif
-
- for (i=0; i < _lmb->memory.cnt; i++) {
- unsigned long lmb_size;
-
- lmb_size = _lmb->memory.region[i].size;
-
-#ifdef CONFIG_MSCHUNKS
- _lmb->memory.region[i].physbase = physbase;
- physbase += lmb_size;
-#else
- _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
-#endif
- mem_size += lmb_size;
- size_mask |= lmb_size;
- }
+ int i;
+
+ lmb.memory.size = 0;
- _lmb->memory.size = mem_size;
+ for (i = 0; i < lmb.memory.cnt; i++)
+ lmb.memory.size += lmb.memory.region[i].size;
}
/* This routine called with relocation disabled. */
@@ -168,7 +142,6 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
if ( adjacent > 0 ) {
rgn->region[i].base -= size;
- rgn->region[i].physbase -= size;
rgn->region[i].size += size;
coalesced++;
break;
@@ -195,11 +168,9 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
for (i=rgn->cnt-1; i >= 0; i--) {
if (base < rgn->region[i].base) {
rgn->region[i+1].base = rgn->region[i].base;
- rgn->region[i+1].physbase = rgn->region[i].physbase;
rgn->region[i+1].size = rgn->region[i].size;
} else {
rgn->region[i+1].base = base;
- rgn->region[i+1].physbase = lmb_abs_to_phys(base);
rgn->region[i+1].size = size;
break;
}
@@ -213,12 +184,11 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
long __init
lmb_add(unsigned long base, unsigned long size)
{
- struct lmb *_lmb = &lmb;
- struct lmb_region *_rgn = &(_lmb->memory);
+ struct lmb_region *_rgn = &(lmb.memory);
/* On pSeries LPAR systems, the first LMB is our RMO region. */
if ( base == 0 )
- _lmb->rmo_size = size;
+ lmb.rmo_size = size;
return lmb_add_region(_rgn, base, size);
@@ -227,8 +197,7 @@ lmb_add(unsigned long base, unsigned long size)
long __init
lmb_reserve(unsigned long base, unsigned long size)
{
- struct lmb *_lmb = &lmb;
- struct lmb_region *_rgn = &(_lmb->reserved);
+ struct lmb_region *_rgn = &(lmb.reserved);
return lmb_add_region(_rgn, base, size);
}
@@ -260,13 +229,10 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
{
long i, j;
unsigned long base = 0;
- struct lmb *_lmb = &lmb;
- struct lmb_region *_mem = &(_lmb->memory);
- struct lmb_region *_rsv = &(_lmb->reserved);
- for (i=_mem->cnt-1; i >= 0; i--) {
- unsigned long lmbbase = _mem->region[i].base;
- unsigned long lmbsize = _mem->region[i].size;
+ for (i=lmb.memory.cnt-1; i >= 0; i--) {
+ unsigned long lmbbase = lmb.memory.region[i].base;
+ unsigned long lmbsize = lmb.memory.region[i].size;
if ( max_addr == LMB_ALLOC_ANYWHERE )
base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
@@ -276,8 +242,8 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
continue;
while ( (lmbbase <= base) &&
- ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) {
- base = _ALIGN_DOWN(_rsv->region[j].base-size, align);
+ ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
+ base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
}
if ( (base != 0) && (lmbbase <= base) )
@@ -287,62 +253,24 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
if ( i < 0 )
return 0;
- lmb_add_region(_rsv, base, size);
+ lmb_add_region(&lmb.reserved, base, size);
return base;
}
+/* You must call lmb_analyze() before this. */
unsigned long __init
lmb_phys_mem_size(void)
{
- struct lmb *_lmb = &lmb;
-#ifdef CONFIG_MSCHUNKS
- return _lmb->memory.size;
-#else
- struct lmb_region *_mem = &(_lmb->memory);
- unsigned long total = 0;
- int i;
-
- /* add all physical memory to the bootmem map */
- for (i=0; i < _mem->cnt; i++)
- total += _mem->region[i].size;
- return total;
-#endif /* CONFIG_MSCHUNKS */
+ return lmb.memory.size;
}
unsigned long __init
lmb_end_of_DRAM(void)
{
- struct lmb *_lmb = &lmb;
- struct lmb_region *_mem = &(_lmb->memory);
- int idx = _mem->cnt - 1;
-
-#ifdef CONFIG_MSCHUNKS
- return (_mem->region[idx].physbase + _mem->region[idx].size);
-#else
- return (_mem->region[idx].base + _mem->region[idx].size);
-#endif /* CONFIG_MSCHUNKS */
-
- return 0;
-}
-
-unsigned long __init
-lmb_abs_to_phys(unsigned long aa)
-{
- unsigned long i, pa = aa;
- struct lmb *_lmb = &lmb;
- struct lmb_region *_mem = &(_lmb->memory);
-
- for (i=0; i < _mem->cnt; i++) {
- unsigned long lmbbase = _mem->region[i].base;
- unsigned long lmbsize = _mem->region[i].size;
- if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
- pa = _mem->region[i].physbase + (aa - lmbbase);
- break;
- }
- }
+ int idx = lmb.memory.cnt - 1;
- return pa;
+ return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
}
/*
@@ -353,20 +281,19 @@ void __init lmb_enforce_memory_limit(void)
{
extern unsigned long memory_limit;
unsigned long i, limit;
- struct lmb_region *mem = &(lmb.memory);
if (! memory_limit)
return;
limit = memory_limit;
- for (i = 0; i < mem->cnt; i++) {
- if (limit > mem->region[i].size) {
- limit -= mem->region[i].size;
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ if (limit > lmb.memory.region[i].size) {
+ limit -= lmb.memory.region[i].size;
continue;
}
- mem->region[i].size = limit;
- mem->cnt = i + 1;
+ lmb.memory.region[i].size = limit;
+ lmb.memory.cnt = i + 1;
break;
}
}