diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grumain.c')
-rw-r--r-- | drivers/misc/sgi-gru/grumain.c | 84 |
1 files changed, 49 insertions, 35 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 3d2fc216bae5..ec3f7a17d221 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -76,10 +76,9 @@ int gru_cpu_fault_map_id(void) /* Hit the asid limit. Start over */ static int gru_wrap_asid(struct gru_state *gru) { - gru_dbg(grudev, "gru %p\n", gru); + gru_dbg(grudev, "gid %d\n", gru->gs_gid); STAT(asid_wrap); gru->gs_asid_gen++; - gru_flush_all_tlb(gru); return MIN_ASID; } @@ -88,19 +87,21 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid) { int i, gid, inuse_asid, limit; - gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); STAT(asid_next); limit = MAX_ASID; if (asid >= limit) asid = gru_wrap_asid(gru); + gru_flush_all_tlb(gru); gid = gru->gs_gid; again: for (i = 0; i < GRU_NUM_CCH; i++) { if (!gru->gs_gts[i]) continue; inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; - gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", - gru, inuse_asid, i, gru->gs_gts[i]); + gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", + gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, + inuse_asid, i); if (inuse_asid == asid) { asid += ASID_INC; if (asid >= limit) { @@ -120,8 +121,8 @@ again: } gru->gs_asid_limit = limit; gru->gs_asid = asid; - gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, - limit); + gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, + asid, limit); return asid; } @@ -130,14 +131,12 @@ static int gru_assign_asid(struct gru_state *gru) { int asid; - spin_lock(&gru->gs_asid_lock); gru->gs_asid += ASID_INC; asid = gru->gs_asid; if (asid >= gru->gs_asid_limit) asid = gru_reset_asid_limit(gru, asid); - spin_unlock(&gru->gs_asid_lock); - gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); + gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); return asid; } @@ -215,17 +214,20 @@ static int check_gru_resources(struct gru_state *gru, int cbr_au_count, * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG * context. */ -static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, - int ctxnum) +static int gru_load_mm_tracker(struct gru_state *gru, + struct gru_thread_state *gts) { + struct gru_mm_struct *gms = gts->ts_gms; struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; - unsigned short ctxbitmap = (1 << ctxnum); + unsigned short ctxbitmap = (1 << gts->ts_ctxnum); int asid; spin_lock(&gms->ms_asid_lock); asid = asids->mt_asid; - if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { + spin_lock(&gru->gs_asid_lock); + if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != + gru->gs_asid_gen)) { asid = gru_assign_asid(gru); asids->mt_asid = asid; asids->mt_asid_gen = gru->gs_asid_gen; @@ -233,6 +235,7 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, } else { STAT(asid_reuse); } + spin_unlock(&gru->gs_asid_lock); BUG_ON(asids->mt_ctxbitmap & ctxbitmap); asids->mt_ctxbitmap |= ctxbitmap; @@ -241,24 +244,28 @@ static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, spin_unlock(&gms->ms_asid_lock); gru_dbg(grudev, - "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", - gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); + "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, + gms->ms_asidmap[0]); return asid; } static void gru_unload_mm_tracker(struct gru_state *gru, - struct gru_mm_struct *gms, int ctxnum) + struct gru_thread_state *gts) { + struct gru_mm_struct *gms = gts->ts_gms; struct gru_mm_tracker *asids; unsigned short ctxbitmap; asids = &gms->ms_asids[gru->gs_gid]; - ctxbitmap = (1 << ctxnum); + ctxbitmap = (1 << gts->ts_ctxnum); spin_lock(&gms->ms_asid_lock); + spin_lock(&gru->gs_asid_lock); BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); asids->mt_ctxbitmap ^= ctxbitmap; - gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", - gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); + gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", + gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); + spin_unlock(&gru->gs_asid_lock); spin_unlock(&gms->ms_asid_lock); } @@ -319,6 +326,7 @@ static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, gts->ts_vma = vma; gts->ts_tlb_int_select = -1; gts->ts_gms = gru_register_mmu_notifier(); + gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); if (!gts->ts_gms) goto err; @@ -399,7 +407,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts) struct gru_state *gru; gru = gts->ts_gru; - gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); + gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); spin_lock(&gru->gs_lock); gru->gs_gts[gts->ts_ctxnum] = NULL; @@ -408,6 +416,7 @@ static void gru_free_gru_context(struct gru_thread_state *gts) __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); gts->ts_ctxnum = NULLCTX; gts->ts_gru = NULL; + gts->ts_blade = -1; spin_unlock(&gru->gs_lock); gts_drop(gts); @@ -432,8 +441,8 @@ static inline long gru_copy_handle(void *d, void *s) return GRU_HANDLE_BYTES; } -static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, - unsigned long length) +static void gru_prefetch_context(void *gseg, void *cb, void *cbe, + unsigned long cbrmap, unsigned long length) { int i, scr; @@ -500,12 +509,12 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); + gru_dbg(grudev, "gts %p\n", gts); lock_cch_handle(cch); if (cch_interrupt_sync(cch)) BUG(); - gru_dbg(grudev, "gts %p\n", gts); - gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + gru_unload_mm_tracker(gru, gts); if (savestate) gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, gts->ts_cbr_map, @@ -534,7 +543,7 @@ static void gru_load_context(struct gru_thread_state *gts) cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); lock_cch_handle(cch); - asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); + asid = gru_load_mm_tracker(gru, gts); cch->tfm_fault_bit_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); @@ -544,7 +553,8 @@ static void gru_load_context(struct gru_thread_state *gts) cch->tlb_int_select = gts->ts_tlb_int_select; } cch->tfm_done_bit_enable = 0; - err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); + err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map, + gts->ts_dsr_map); if (err) { gru_dbg(grudev, "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", @@ -565,11 +575,12 @@ static void gru_load_context(struct gru_thread_state *gts) /* * Update fields in an active CCH: * - retarget interrupts on local blade + * - update sizeavail mask * - force a delayed context unload by clearing the CCH asids. This * forces TLB misses for new GRU instructions. The context is unloaded * when the next TLB miss occurs. */ -static int gru_update_cch(struct gru_thread_state *gts, int int_select) +int gru_update_cch(struct gru_thread_state *gts, int force_unload) { struct gru_context_configuration_handle *cch; struct gru_state *gru = gts->ts_gru; @@ -583,9 +594,11 @@ static int gru_update_cch(struct gru_thread_state *gts, int int_select) goto exit; if (cch_interrupt(cch)) BUG(); - if (int_select >= 0) { - gts->ts_tlb_int_select = int_select; - cch->tlb_int_select = int_select; + if (!force_unload) { + for (i = 0; i < 8; i++) + cch->sizeavail[i] = gts->ts_sizeavail; + gts->ts_tlb_int_select = gru_cpu_fault_map_id(); + cch->tlb_int_select = gru_cpu_fault_map_id(); } else { for (i = 0; i < 8; i++) cch->asid[i] = 0; @@ -617,7 +630,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts) gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, gru_cpu_fault_map_id()); - return gru_update_cch(gts, gru_cpu_fault_map_id()); + return gru_update_cch(gts, 0); } @@ -688,7 +701,7 @@ static void gru_steal_context(struct gru_thread_state *gts) STAT(steal_context_failed); } gru_dbg(grudev, - "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" + "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" " avail cb %ld, ds %ld\n", gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), hweight64(gru->gs_dsr_map)); @@ -727,6 +740,7 @@ again: } reserve_gru_resources(gru, gts); gts->ts_gru = gru; + gts->ts_blade = gru->gs_blade_id; gts->ts_ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); @@ -737,7 +751,7 @@ again: STAT(assign_context); gru_dbg(grudev, - "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", + "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, gts->ts_gru->gs_gid, gts->ts_ctxnum, gts->ts_cbr_au_count, gts->ts_dsr_au_count); @@ -773,8 +787,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; again: - preempt_disable(); mutex_lock(>s->ts_ctxlock); + preempt_disable(); if (gts->ts_gru) { if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { STAT(migrated_nopfn_unload); |