DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
-#ifdef CONFIG_PPC_64K_PAGES
- DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
-#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
/*
* preload some userspace segments into the SLB.
__get_cpu_var(stab_cache_ptr) = 0;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
-
/* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
* 2 of the License, or (at your option) any later version.
*/
-/*
- * Getting into a kernel thread, there is no valid user segment, mark
- * paca->pgdir NULL so that SLB miss on user addresses will fault
- */
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = NULL;
-#endif /* CONFIG_PPC_64K_PAGES */
}
#define NO_CONTEXT 0
cpu_set(smp_processor_id(), next->cpu_vm_mask);
/* No need to flush userspace segments if the mm doesnt change */
-#ifdef CONFIG_PPC_64K_PAGES
- if (prev == next && get_paca()->pgdir == next->pgd)
- return;
-#else
if (prev == next)
return;
-#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
u64 exmc[10]; /* used for machine checks */
u64 exslb[10]; /* used for SLB/segment table misses
* on the linear mapping */
-#ifdef CONFIG_PPC_64K_PAGES
- pgd_t *pgdir;
-#endif /* CONFIG_PPC_64K_PAGES */
mm_context_t context;
u16 slb_cache[SLB_CACHE_ENTRIES];