From: Torsten Duwe <duwe@suse.de>
Subject: Make bigmem patches configurable
Patch-mainline: never, only makes feature update switch OBS compliant
References: bsc#928138,fate#319026

*** Autogenerated by scripts/bigmem-generate-ifdef-guard -- DO NOT EDIT ! ***

diff -urp a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -405,6 +405,13 @@ extern const char *powerpc_base_platform
 #define CPU_FTRS_GENERIC_32	(CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
 
 /* 64-bit CPUs */
+#ifndef CONFIG_BIGMEM
+#define CPU_FTRS_POWER3	(CPU_FTR_USE_TB | \
+	    CPU_FTR_IABR | CPU_FTR_PPC_LE)
+#define CPU_FTRS_RS64	(CPU_FTR_USE_TB | \
+	    CPU_FTR_IABR | \
+	    CPU_FTR_MMCRA | CPU_FTR_CTRL)
+#endif
 #define CPU_FTRS_POWER4	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
@@ -459,11 +466,19 @@ extern const char *powerpc_base_platform
 #ifdef CONFIG_PPC_BOOK3E
 #define CPU_FTRS_POSSIBLE	(CPU_FTRS_E5500 | CPU_FTRS_A2)
 #else
+#ifndef CONFIG_BIGMEM
+#define CPU_FTRS_POSSIBLE	\
+	    (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |	\
+	    CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 |	\
+	    CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | CPU_FTRS_CELL |		\
+	    CPU_FTRS_PA6T | CPU_FTR_VSX)
+#else
 #define CPU_FTRS_POSSIBLE \
 	    (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
 	     CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | \
 	     CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_VSX)
 #endif
+#endif
 #else
 enum {
 	CPU_FTRS_POSSIBLE =
@@ -507,11 +522,18 @@ enum {
 #ifdef CONFIG_PPC_BOOK3E
 #define CPU_FTRS_ALWAYS		(CPU_FTRS_E5500 & CPU_FTRS_A2)
 #else
+#ifndef CONFIG_BIGMEM
+#define CPU_FTRS_ALWAYS		\
+	    (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &	\
+	    CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 &	\
+	    CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#else
 #define CPU_FTRS_ALWAYS \
 	    (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
 	     CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
 	     CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
 #endif
+#endif
 #else
 enum {
 	CPU_FTRS_ALWAYS =
diff -urp a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -38,6 +38,8 @@ static inline struct kvmppc_book3s_shado
 #define SID_SHIFT	28
 #define ESID_MASK	0xf0000000
 #define VSID_MASK	0x00fffffff0000000ULL
+#ifdef CONFIG_BIGMEM
 #define VPN_SHIFT	12
+#endif
 
 #endif /* __ASM_KVM_BOOK3S_32_H__ */
diff -urp a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -169,7 +169,11 @@ struct hpte_cache {
 	struct hlist_node list_vpte;
 	struct hlist_node list_vpte_long;
 	struct rcu_head rcu_head;
+#ifndef CONFIG_BIGMEM
+	u64 host_va;
+#else
 	u64 host_vpn;
+#endif
 	u64 pfn;
 	ulong slot;
 	struct kvmppc_pte pte;
diff -urp a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -33,19 +33,31 @@ struct machdep_calls {
 	char		*name;
 #ifdef CONFIG_PPC64
 	void            (*hpte_invalidate)(unsigned long slot,
+#ifndef CONFIG_BIGMEM
+					   unsigned long va,
+#else
 					   unsigned long vpn,
+#endif
 					   int psize, int ssize,
 					   int local);
 	long		(*hpte_updatepp)(unsigned long slot, 
 					 unsigned long newpp, 
+#ifndef CONFIG_BIGMEM
+					 unsigned long va,
+#else
 					 unsigned long vpn,
+#endif
 					 int psize, int ssize,
 					 int local);
 	void            (*hpte_updateboltedpp)(unsigned long newpp, 
 					       unsigned long ea,
 					       int psize, int ssize);
 	long		(*hpte_insert)(unsigned long hpte_group,
+#ifndef CONFIG_BIGMEM
+				       unsigned long va,
+#else
 				       unsigned long vpn,
+#endif
 				       unsigned long prpn,
 				       unsigned long rflags,
 				       unsigned long vflags,
diff -urp a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -18,6 +18,9 @@ extern int init_new_context(struct task_
 extern void destroy_context(struct mm_struct *mm);
 
 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
+#ifndef CONFIG_BIGMEM
+extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
+#endif
 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
 extern void set_context(unsigned long id, pgd_t *pgd);
 
@@ -78,6 +81,10 @@ static inline void switch_mm(struct mm_s
 #ifdef CONFIG_PPC_STD_MMU_64
 	if (mmu_has_feature(MMU_FTR_SLB))
 		switch_slb(tsk, next);
+#ifndef CONFIG_BIGMEM
+	else
+		switch_stab(tsk, next);
+#endif
 #else
 	/* Out of line for now */
 	switch_mmu_context(prev, next);
diff -urp a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -26,11 +26,13 @@
  * This is individual features
  */
 
+#ifdef CONFIG_BIGMEM
 /*
  * Support for 68 bit VA space. We added that from ISA 2.05
  */
 #define MMU_FTR_68_BIT_VA		ASM_CONST(0x00002000)
 
+#endif
 /* Enable use of high BAT registers */
 #define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)
 
@@ -104,9 +106,15 @@
 #define MMU_FTRS_POWER4		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
 #define MMU_FTRS_PPC970		MMU_FTRS_POWER4
 #define MMU_FTRS_POWER5		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#ifndef CONFIG_BIGMEM
+#define MMU_FTRS_POWER6		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER7		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#define MMU_FTRS_POWER8		MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
+#else
 #define MMU_FTRS_POWER6		MMU_FTRS_POWER5 | MMU_FTR_68_BIT_VA
 #define MMU_FTRS_POWER7		MMU_FTRS_POWER6
 #define MMU_FTRS_POWER8		MMU_FTRS_POWER6
+#endif
 #define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
 				MMU_FTR_CI_LARGE_PAGE
 #define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -142,6 +150,7 @@ extern void setup_initial_memory_limit(p
 extern u64 ppc64_rma_size;
 #endif /* CONFIG_PPC64 */
 
+#ifdef CONFIG_BIGMEM
 struct mm_struct;
 #ifdef CONFIG_DEBUG_VM
 extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
@@ -151,6 +160,7 @@ static inline void assert_pte_locked(str
 }
 #endif /* !CONFIG_DEBUG_VM */
 
+#endif
 #endif /* !__ASSEMBLY__ */
 
 /* The kernel use the constants below to index in the page sizes array.
diff -urp a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -15,12 +15,34 @@
 #include <asm/asm-compat.h>
 #include <asm/page.h>
 
+#ifndef CONFIG_BIGMEM
+/*
+ * Segment table
+ */
+
+#define STE_ESID_V	0x80
+#define STE_ESID_KS	0x20
+#define STE_ESID_KP	0x10
+#define STE_ESID_N	0x08
+
+#define STE_VSID_SHIFT	12
+
+/* Location of cpu0's segment table */
+#define STAB0_PAGE	0x8
+#define STAB0_OFFSET	(STAB0_PAGE << 12)
+#define STAB0_PHYS_ADDR	(STAB0_OFFSET + PHYSICAL_START)
+
+#ifndef __ASSEMBLY__
+extern char initial_stab[];
+#endif /* ! __ASSEMBLY */
+#else
 /*.
  * This is necessary to get the definition of PGTABLE_RANGE which we
  * need for various slices related matters. Note that this isn't the
  * complete pgtable.h but only a portion of it.
 ·*/
 #include <asm/pgtable-ppc64.h>
+#endif
 
 /*
  * SLB
@@ -35,7 +57,9 @@
 
 /* Bits in the SLB VSID word */
 #define SLB_VSID_SHIFT		12
+#ifdef CONFIG_BIGMEM
 #define SLB_VSID_SHIFT_256M	SLB_VSID_SHIFT
+#endif
 #define SLB_VSID_SHIFT_1T	24
 #define SLB_VSID_SSIZE_SHIFT	62
 #define SLB_VSID_B		ASM_CONST(0xc000000000000000)
@@ -136,6 +160,7 @@ struct mmu_psize_def
 #define MMU_SEGSIZE_256M	0
 #define MMU_SEGSIZE_1T		1
 
+#ifdef CONFIG_BIGMEM
 /*
  * encode page number shift.
  * in order to fit the 78 bit va in a 64 bit variable we shift the va by
@@ -145,9 +170,11 @@ struct mmu_psize_def
  * we work in all cases including 4k page size.
  */
 #define VPN_SHIFT	12
+#endif
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_BIGMEM
 static inline int segment_shift(int ssize)
 {
 	if (ssize == MMU_SEGSIZE_256M)
@@ -155,6 +182,7 @@ static inline int segment_shift(int ssiz
 	return SID_SHIFT_1T;
 }
 
+#endif
 /*
  * The current system page and segment sizes
  */
@@ -177,6 +205,7 @@ extern unsigned long tce_alloc_start, tc
  */
 extern int mmu_ci_restrictions;
 
+#ifdef CONFIG_BIGMEM
 /*.
  * This computes the AVPN and B fields of the first dword of a HPTE,
  * for use when we want to match an existing PTE.  The bottom 7 bits
@@ -200,17 +229,31 @@ static inline unsigned long hpte_encode_
 	return v;
 }
 
+#endif
 /*
  * This function sets the AVPN and L fields of the HPTE  appropriately
  * for the page size
  */
+#ifndef CONFIG_BIGMEM
+static inline unsigned long hpte_encode_v(unsigned long va, int psize,
+					  int ssize)
+#else
 static inline unsigned long hpte_encode_v(unsigned long vpn,
 					  int psize, int ssize)
+#endif
 {
 	unsigned long v;
+#ifndef CONFIG_BIGMEM
+	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
+	v <<= HPTE_V_AVPN_SHIFT;
+#else
 	v = hpte_encode_avpn(vpn, psize, ssize);
+#endif
 	if (psize != MMU_PAGE_4K)
 		v |= HPTE_V_LARGE;
+#ifndef CONFIG_BIGMEM
+	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+#endif
 	return v;
 }
 
@@ -235,37 +278,71 @@ static inline unsigned long hpte_encode_
 }
 
 /*
+#ifndef CONFIG_BIGMEM
+ * Build a VA given VSID, EA and segment size
+#else
  * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
+#endif
  */
+#ifndef CONFIG_BIGMEM
+static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
+				   int ssize)
+#else
 static inline unsigned long hpt_vpn(unsigned long ea,
 				    unsigned long vsid, int ssize)
+#endif
 {
+#ifndef CONFIG_BIGMEM
+	if (ssize == MMU_SEGSIZE_256M)
+		return (vsid << 28) | (ea & 0xfffffffUL);
+	return (vsid << 40) | (ea & 0xffffffffffUL);
+#else
 	unsigned long mask;
 	int s_shift = segment_shift(ssize);
 
 	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
 	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
+#endif
 }
 
 /*
  * This hashes a virtual address
  */
+#ifndef CONFIG_BIGMEM
+
+static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
+				     int ssize)
+#else
 static inline unsigned long hpt_hash(unsigned long vpn,
 				     unsigned int shift, int ssize)
+#endif
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long mask;
+#endif
 	unsigned long hash, vsid;
 
+#ifdef CONFIG_BIGMEM
 	/* VPN_SHIFT can be atmost 12 */
+#endif
 	if (ssize == MMU_SEGSIZE_256M) {
+#ifndef CONFIG_BIGMEM
+		hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
+#else
 		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
 		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
 			((vpn & mask) >> (shift - VPN_SHIFT));
+#endif
 	} else {
+#ifndef CONFIG_BIGMEM
+		vsid = va >> 40;
+		hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
+#else
 		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
 		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
 		hash = vsid ^ (vsid << 25) ^
 			((vpn & mask) >> (shift - VPN_SHIFT)) ;
+#endif
 	}
 	return hash & 0x7fffffffffUL;
 }
@@ -298,14 +375,32 @@ extern void hpte_init_iSeries(void);
 extern void hpte_init_beat(void);
 extern void hpte_init_beat_v3(void);
 
+#ifndef CONFIG_BIGMEM
+extern void stabs_alloc(void);
+#endif
 extern void slb_initialize(void);
 extern void slb_flush_and_rebolt(void);
+#ifndef CONFIG_BIGMEM
+extern void stab_initialize(unsigned long stab);
+#endif
 
 extern void slb_vmalloc_update(void);
 extern void slb_set_size(u16 size);
 #endif /* __ASSEMBLY__ */
 
 /*
+#ifndef CONFIG_BIGMEM
+ * VSID allocation
+ *
+ * We first generate a 36-bit "proto-VSID".  For kernel addresses this
+ * is equal to the ESID, for user addresses it is:
+ *	(context << 15) | (esid & 0x7fff)
+ *
+ * The two forms are distinguishable because the top bit is 0 for user
+ * addresses, whereas the top two bits are 1 for kernel addresses.
+ * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
+ * now.
+#else
  * VSID allocation (256MB segment)
  *·
  * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
@@ -319,15 +414,44 @@ extern void slb_set_size(u16 size);
  * 0x00002 -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
  * 0x00003 -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
  * 0x00004 -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+#endif
  *
  * The proto-VSIDs are then scrambled into real VSIDs with the
  * multiplicative hash:
  *
  *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
+#ifndef CONFIG_BIGMEM
+ *	where	VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
+ *		VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
+ *
+ * This scramble is only well defined for proto-VSIDs below
+ * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
+ * reserved.  VSID_MULTIPLIER is prime, so in particular it is
+#else
  *·
  * VSID_MULTIPLIER is prime, so in particular it is
+#endif
  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  * Because the modulus is 2^n-1 we can compute it efficiently without
+#ifndef CONFIG_BIGMEM
+ * a divide or extra multiply (see below).
+ *
+ * This scheme has several advantages over older methods:
+ *
+ * 	- We have VSIDs allocated for every kernel address
+ * (i.e. everything above 0xC000000000000000), except the very top
+ * segment, which simplifies several things.
+ *
+ * 	- We allow for 15 significant bits of ESID and 20 bits of
+ * context for user addresses.  i.e. 8T (43 bits) of address space for
+ * up to 1M contexts (although the page table structure and context
+ * allocation will need changes to take advantage of this).
+ *
+ * 	- The scramble function gives robust scattering in the hash
+ * table (at least based on some initial results).  The previous
+ * method was more susceptible to pathological cases giving excessive
+ * hash collisions.
+#else
  * a divide or extra multiply (see below). The scramble function gives
  * robust scattering in the hash table (at least based on some initial
  * results).
@@ -339,9 +463,20 @@ extern void slb_set_size(u16 size);
  * We also need to avoid the last segment of the last context, because that
  * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
  * because of the modulo operation in vsid scramble.
+#endif
  */
+#ifdef CONFIG_BIGMEM
 
+#endif
 /*
+#ifndef CONFIG_BIGMEM
+ * WARNING - If you change these you must make sure the asm
+ * implementations in slb_allocate (slb_low.S), do_stab_bolted
+ * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
+ *
+ * You'll also need to change the precomputed VSID values in head.S
+ * which are used by the iSeries firmware.
+#else
  * Max Va bits we support as of now is 68 bits. We want 19 bit
  * context ID.
  * Restrictions:
@@ -349,15 +484,29 @@ extern void slb_set_size(u16 size);
  * (47 bit effective address). We also cannot do more than 20bit PID.
  * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
  * to 16 bits (ie, we can only have 2^16 pids at the same time).
+#endif
  */
+#ifdef CONFIG_BIGMEM
 #define VA_BITS			68
 #define CONTEXT_BITS		19
 #define ESID_BITS		(VA_BITS - (SID_SHIFT + CONTEXT_BITS))
 #define ESID_BITS_1T		(VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
+#endif
 
+#ifndef CONFIG_BIGMEM
+#define VSID_MULTIPLIER_256M	ASM_CONST(200730139)	/* 28-bit prime */
+#define VSID_BITS_256M		36
+#define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
+#else
 #define ESID_BITS_MASK		((1 << ESID_BITS) - 1)
 #define ESID_BITS_1T_MASK	((1 << ESID_BITS_1T) - 1)
+#endif
 
+#ifndef CONFIG_BIGMEM
+#define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
+#define VSID_BITS_1T		24
+#define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)
+#else
 /*
  * 256MB segment
  * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
@@ -371,16 +520,41 @@ extern void slb_set_size(u16 size);
  */
 #define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 2)
 #define MIN_USER_CONTEXT	(5)
+#endif
 
+#ifndef CONFIG_BIGMEM
+#define CONTEXT_BITS		19
+#define USER_ESID_BITS		16
+#define USER_ESID_BITS_1T	4
+#else
 /* Would be nice to use KERNEL_REGION_ID here */
 #define KERNEL_REGION_CONTEXT_OFFSET	(0xc - 1)
+#endif
 
+#ifndef CONFIG_BIGMEM
+#define USER_VSID_RANGE	(1UL << (USER_ESID_BITS + SID_SHIFT))
+#else
 /*
  * For platforms that support on 65bit VA we limit the context bits
  */
 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
+#endif
 
 /*
+#ifndef CONFIG_BIGMEM
+ * This macro generates asm code to compute the VSID scramble
+ * function.  Used in slb_allocate() and do_stab_bolted.  The function
+ * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ *	rt = register continaing the proto-VSID and into which the
+ *		VSID will be stored
+ *	rx = scratch register (clobbered)
+ *
+ * 	- rt and rx must be different registers
+ * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
+ * 	  bits may contain other garbage, so you may need to mask the
+ * 	  result.
+#else
  * This should be computed such that protovosid * vsid_mulitplier
  * doesn't overflow 64 bits. The vsid_mutliplier should also be
  * co-prime to vsid_modulus. We also need to make sure that number
@@ -404,7 +578,27 @@ extern void slb_set_size(u16 size);
  * | 256MB |         24 |                   40 |         64 |                 80 |
  * |-------+------------+----------------------+------------+--------------------|
  *·
+#endif
  */
+#ifndef CONFIG_BIGMEM
+#define ASM_VSID_SCRAMBLE(rt, rx, size)					\
+	lis	rx,VSID_MULTIPLIER_##size@h;				\
+	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
+	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
+									\
+	srdi	rx,rt,VSID_BITS_##size;					\
+	clrldi	rt,rt,(64-VSID_BITS_##size);				\
+	add	rt,rt,rx;		/* add high and low bits */	\
+	/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
+	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
+	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
+	 * the bit clear, r3 already has the answer we want, if it	\
+	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
+	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
+	addi	rx,rt,1;						\
+	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
+	add	rt,rt,rx
+#else
 #define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
 #define VSID_BITS_256M		(VA_BITS - SID_SHIFT)
 #define VSID_BITS_65_256M	(65 - SID_SHIFT)
@@ -412,11 +606,14 @@ extern void slb_set_size(u16 size);
 #define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
 #define VSID_BITS_1T		(VA_BITS - SID_SHIFT_1T)
 #define VSID_BITS_65_1T		(65 - SID_SHIFT_1T)
+#endif
 
+#ifdef CONFIG_BIGMEM
 #define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
 
 /* 4 bits per slice and we have one slice per 1TB */
 #define SLICE_ARRAY_SIZE  (PGTABLE_RANGE >> 41)
+#endif
 
 #ifndef __ASSEMBLY__
 
@@ -461,7 +658,11 @@ typedef struct {
 
 #ifdef CONFIG_PPC_MM_SLICES
 	u64 low_slices_psize;	/* SLB page size encodings */
+#ifndef CONFIG_BIGMEM
+	u64 high_slices_psize;  /* 4 bits per slice for now */
+#else
 	unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
+#endif
 #else
 	u16 sllp;		/* SLB page size encoding */
 #endif
@@ -476,9 +677,40 @@ typedef struct {
 #endif /* CONFIG_PPC_ICSWX */
 } mm_context_t;
 
+#ifndef CONFIG_BIGMEM
+
+#if 0
+/*
+ * The code below is equivalent to this function for arguments
+ * < 2^VSID_BITS, which is all this should ever be called
+ * with.  However gcc is not clever enough to compute the
+ * modulus (2^n-1) without a second multiply.
+ */
+#define vsid_scramble(protovsid, size) \
+	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
+
+#else /* 1 */
+#define vsid_scramble(protovsid, size) \
+	({								 \
+		unsigned long x;					 \
+		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
+		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
+		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
+	})
+#endif /* 1 */
+
+/* This is only valid for addresses >= PAGE_OFFSET */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+#else
 static inline unsigned long vsid_scramble(unsigned long protovsid,
 				  unsigned long vsid_multiplier, int vsid_bits)
+#endif
 {
+#ifndef CONFIG_BIGMEM
+	if (ssize == MMU_SEGSIZE_256M)
+		return vsid_scramble(ea >> SID_SHIFT, 256M);
+	return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
+#else
 	unsigned long vsid;
 	unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
 	/*
@@ -487,6 +719,7 @@ static inline unsigned long vsid_scrambl
 	vsid = protovsid * vsid_multiplier;
 	vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
 	return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
+#endif
 }
 
 /* Returns the segment size indicator for a user address */
@@ -498,9 +731,19 @@ static inline int user_segment_size(unsi
 	return MMU_SEGSIZE_256M;
 }
 
+#ifndef CONFIG_BIGMEM
+/* This is only valid for user addresses (which are below 2^44) */
+#endif
 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 				     int ssize)
 {
+#ifndef CONFIG_BIGMEM
+	if (ssize == MMU_SEGSIZE_256M)
+		return vsid_scramble((context << USER_ESID_BITS)
+				     | (ea >> SID_SHIFT), 256M);
+	return vsid_scramble((context << USER_ESID_BITS_1T)
+			     | (ea >> SID_SHIFT_1T), 1T);
+#else
 	unsigned long va_bits = VA_BITS;
 	unsigned long vsid_bits;
 	unsigned long protovsid;
@@ -525,17 +768,21 @@ static inline unsigned long get_vsid(uns
 	protovsid = (context << ESID_BITS_1T) |
 		((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
 	return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
+#endif
 }
 
 /*
  * This is only used on legacy iSeries in lparmap.c,
  * hence the 256MB segment assumption.
  */
+#ifdef CONFIG_BIGMEM
 #define VSID_MODULUS_256M      ((1UL<<VSID_BITS_256M)-1)
+#endif
 #define VSID_SCRAMBLE(pvsid)	(((pvsid) * VSID_MULTIPLIER_256M) %	\
 				 VSID_MODULUS_256M)
 #define KERNEL_VSID(ea)		VSID_SCRAMBLE(GET_ESID(ea))
 
+#ifdef CONFIG_BIGMEM
 /*
  * This is only valid for addresses >= PAGE_OFFSET
  */
@@ -562,6 +809,7 @@ static inline unsigned long get_kernel_v
 
 	return get_vsid(context, ea, ssize);
 }
+#endif
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff -urp a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -73,6 +73,12 @@ struct paca_struct {
 	u64 kernel_toc;			/* Kernel TOC address */
 	u64 kernelbase;			/* Base address of kernel */
 	u64 kernel_msr;			/* MSR while running in kernel */
+#ifndef CONFIG_BIGMEM
+#ifdef CONFIG_PPC_STD_MMU_64
+	u64 stab_real;			/* Absolute address of segment table */
+	u64 stab_addr;			/* Virtual address of segment table */
+#endif /* CONFIG_PPC_STD_MMU_64 */
+#endif
 	void *emergency_sp;		/* pointer to emergency stack */
 	u64 data_offset;		/* per cpu data offset */
 	s16 hw_cpu_id;			/* Physical processor number */
@@ -98,7 +104,11 @@ struct paca_struct {
 	/* SLB related definitions */
 	u16 vmalloc_sllp;
 	u16 slb_cache_ptr;
+#ifndef CONFIG_BIGMEM
+	u16 slb_cache[SLB_CACHE_ENTRIES];
+#else
 	u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 #ifdef CONFIG_PPC_BOOK3E
Only in a/arch/powerpc/include/asm: paca.h.orig
diff -urp a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -90,6 +90,14 @@ extern unsigned int HPAGE_SHIFT;
 #define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
 
 #ifndef __ASSEMBLY__
+#ifndef CONFIG_BIGMEM
+
+struct slice_mask {
+	u16 low_slices;
+	u16 high_slices;
+};
+
+#endif
 struct mm_struct;
 
 extern unsigned long slice_get_unmapped_area(unsigned long addr,
Only in a/arch/powerpc/include/asm: page_64.h.orig
diff -urp a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -9,6 +9,16 @@
 
 struct mm_struct;
 
+#ifndef CONFIG_BIGMEM
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
+#endif
 #endif /* !__ASSEMBLY__ */
 
 #if defined(CONFIG_PPC64)
@@ -19,8 +29,10 @@ struct mm_struct;
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_BIGMEM
 #include <asm/tlbflush.h>
 
+#endif
 /* Generic accessors to PTE bits */
 static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_RW; }
 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
diff -urp a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -7,7 +7,11 @@
  */
 #define PTE_INDEX_SIZE  9
 #define PMD_INDEX_SIZE  7
+#ifndef CONFIG_BIGMEM
+#define PUD_INDEX_SIZE  7
+#else
 #define PUD_INDEX_SIZE  9
+#endif
 #define PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
diff -urp a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -7,7 +7,11 @@
 #define PTE_INDEX_SIZE  12
 #define PMD_INDEX_SIZE  12
 #define PUD_INDEX_SIZE	0
+#ifndef CONFIG_BIGMEM
+#define PGD_INDEX_SIZE  4
+#else
 #define PGD_INDEX_SIZE  9
+#endif
 
 #ifndef __ASSEMBLY__
 #define PTE_TABLE_SIZE	(sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff -urp a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -21,6 +21,19 @@
 #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
 
 
+#ifndef CONFIG_BIGMEM
+/* Some sanity checking */
+#if TASK_SIZE_USER64 > PGTABLE_RANGE
+#error TASK_SIZE_USER64 exceeds pagetable range
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_64
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#error TASK_SIZE_USER64 exceeds user VSID range
+#endif
+#endif
+
+#endif
 /*
  * Define the address range of the kernel non-linear virtual area
  */
@@ -30,7 +43,11 @@
 #else
 #define KERN_VIRT_START ASM_CONST(0xD000000000000000)
 #endif
+#ifndef CONFIG_BIGMEM
+#define KERN_VIRT_SIZE	PGTABLE_RANGE
+#else
 #define KERN_VIRT_SIZE	ASM_CONST(0x0000100000000000)
+#endif
 
 /*
  * The vmalloc space starts at the beginning of that region, and
@@ -106,6 +123,11 @@
 
 #ifndef __ASSEMBLY__
 
+#ifndef CONFIG_BIGMEM
+#include <linux/stddef.h>
+#include <asm/tlbflush.h>
+
+#endif
 /*
  * This is the default implementation of various PTE accessors, it's
  * used in all cases except Book3S with 64K pages where we have a
@@ -184,8 +206,12 @@
 /* to find an entry in a kernel page-table-directory */
 /* This now only contains the vmalloc pages */
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#ifndef CONFIG_BIGMEM
+
+#else
 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 			    pte_t *ptep, unsigned long pte, int huge);
+#endif
 
 /* Atomic PTE updates */
 static inline unsigned long pte_update(struct mm_struct *mm,
@@ -329,6 +355,13 @@ static inline void __ptep_set_access_fla
 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
 /* Encode and de-code a swap entry */
+#ifndef CONFIG_BIGMEM
+#define __swp_type(entry)	(((entry).val >> 1) & 0x3f)
+#define __swp_offset(entry)	((entry).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
+#define __pte_to_swp_entry(pte)	((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val << PTE_RPN_SHIFT })
+#else
 #define MAX_SWAPFILES_CHECK() do { \
 	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
 	/*							\
@@ -350,6 +383,7 @@ static inline void __ptep_set_access_fla
 
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)		__pte((x).val)
+#endif
 #define pte_to_pgoff(pte)	(pte_val(pte) >> PTE_RPN_SHIFT)
 #define pgoff_to_pte(off)	((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
 #define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_RPN_SHIFT)
diff -urp a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -109,6 +109,10 @@ extern struct task_struct *last_task_use
 #endif
 
 #ifdef CONFIG_PPC64
+#ifndef CONFIG_BIGMEM
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
+#else
 /*
  * 64-bit user address space can have multiple limits
  * For now supported values are:
@@ -125,6 +129,7 @@ extern struct task_struct *last_task_use
 #else
 #define TASK_SIZE_USER64 TASK_SIZE_64TB
 #endif
+#endif
 
 /* 
  * 32-bit user address space is 4GB - 1 page 
diff -urp a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
--- a/arch/powerpc/include/asm/pte-book3e.h
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -12,7 +12,9 @@
 #define _PAGE_PRESENT	0x000001 /* software: pte contains a translation */
 #define _PAGE_FILE	0x000002 /* (!present only) software: pte holds file offset */
 #define _PAGE_SW1	0x000002
+#ifdef CONFIG_BIGMEM
 #define _PAGE_BIT_SWAP_TYPE	2
+#endif
 #define _PAGE_BAP_SR	0x000004
 #define _PAGE_BAP_UR	0x000008
 #define _PAGE_BAP_SW	0x000010
diff -urp a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -58,6 +58,16 @@
 /* Trick: we set __end to va + 64k, which happens works for
  * a 16M page as well as we want only one iteration
  */
+#ifndef CONFIG_BIGMEM
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)	    \
+        do {                                                                \
+                unsigned long __end = va + PAGE_SIZE;                       \
+                unsigned __split = (psize == MMU_PAGE_4K ||                 \
+				    psize == MMU_PAGE_64K_AP);              \
+                shift = mmu_psize_defs[psize].shift;                        \
+		for (index = 0; va < __end; index++, va += (1L << shift)) { \
+		        if (!__split || __rpte_sub_valid(rpte, index)) do {
+#else
 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)	\
 	do {								\
 		unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));	\
@@ -68,6 +78,7 @@
 			     vpn += (1L << (shift - VPN_SHIFT))) {	\
 			if (!__split || __rpte_sub_valid(rpte, index))	\
 				do {
+#endif
 
 #define pte_iterate_hashed_end() } while(0); } } while(0)
 
diff -urp a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
--- a/arch/powerpc/include/asm/pte-hash64.h
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -17,7 +17,9 @@
 #define _PAGE_PRESENT		0x0001 /* software: pte contains a translation */
 #define _PAGE_USER		0x0002 /* matches one of the PP bits */
 #define _PAGE_FILE		0x0002 /* (!present only) software: pte holds file offset */
+#ifdef CONFIG_BIGMEM
 #define _PAGE_BIT_SWAP_TYPE	2
+#endif
 #define _PAGE_EXEC		0x0004 /* No execute on POWER4 and newer (we invert) */
 #define _PAGE_GUARDED		0x0008
 #define _PAGE_COHERENT		0x0010 /* M: enforce memory coherence (SMP systems) */
diff -urp a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -10,8 +10,13 @@
  */
 #define SECTION_SIZE_BITS       24
 
+#ifndef CONFIG_BIGMEM
+#define MAX_PHYSADDR_BITS       44
+#define MAX_PHYSMEM_BITS        44
+#else
 #define MAX_PHYSADDR_BITS       46
 #define MAX_PHYSMEM_BITS        46
+#endif
 
 #endif /* CONFIG_SPARSEMEM */
 
diff -urp a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -95,7 +95,11 @@ struct ppc64_tlb_batch {
 	unsigned long		index;
 	struct mm_struct	*mm;
 	real_pte_t		pte[PPC64_TLB_BATCH_NR];
+#ifndef CONFIG_BIGMEM
+	unsigned long		vaddr[PPC64_TLB_BATCH_NR];
+#else
 	unsigned long		vpn[PPC64_TLB_BATCH_NR];
+#endif
 	unsigned int		psize;
 	int			ssize;
 };
@@ -103,6 +107,11 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch,
 
 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
 
+#ifndef CONFIG_BIGMEM
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+			    pte_t *ptep, unsigned long pte, int huge);
+
+#endif
 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
 static inline void arch_enter_lazy_mmu_mode(void)
@@ -124,7 +133,11 @@ static inline void arch_leave_lazy_mmu_m
 #define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 
+#ifndef CONFIG_BIGMEM
+extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
+#else
 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
+#endif
 			    int ssize, int local);
 extern void flush_hash_range(unsigned long number, int local);
 
diff -urp a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -172,6 +172,10 @@ int main(void)
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_STD_MMU_64
+#ifndef CONFIG_BIGMEM
+	DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+	DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+#endif
 	DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
 	DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
 	DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
diff -urp a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -112,6 +112,98 @@ extern void __restore_cpu_e5500(void);
 
 static struct cpu_spec __initdata cpu_specs[] = {
 #ifdef CONFIG_PPC_BOOK3S_64
+#ifndef CONFIG_BIGMEM
+	{	/* Power3 */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00400000,
+		.cpu_name		= "POWER3 (630)",
+		.cpu_features		= CPU_FTRS_POWER3,
+		.cpu_user_features	= COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power3",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "power3",
+	},
+	{	/* Power3+ */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00410000,
+		.cpu_name		= "POWER3 (630+)",
+		.cpu_features		= CPU_FTRS_POWER3,
+		.cpu_user_features	= COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power3",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "power3",
+	},
+	{	/* Northstar */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00330000,
+		.cpu_name		= "RS64-II (northstar)",
+		.cpu_features		= CPU_FTRS_RS64,
+		.cpu_user_features	= COMMON_USER_PPC64,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/rs64",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "rs64",
+	},
+	{	/* Pulsar */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00340000,
+		.cpu_name		= "RS64-III (pulsar)",
+		.cpu_features		= CPU_FTRS_RS64,
+		.cpu_user_features	= COMMON_USER_PPC64,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/rs64",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "rs64",
+	},
+	{	/* I-star */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00360000,
+		.cpu_name		= "RS64-III (icestar)",
+		.cpu_features		= CPU_FTRS_RS64,
+		.cpu_user_features	= COMMON_USER_PPC64,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/rs64",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "rs64",
+	},
+	{	/* S-star */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x00370000,
+		.cpu_name		= "RS64-IV (sstar)",
+		.cpu_features		= CPU_FTRS_RS64,
+		.cpu_user_features	= COMMON_USER_PPC64,
+		.mmu_features		= MMU_FTR_HPTE_TABLE,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 8,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/rs64",
+		.oprofile_type		= PPC_OPROFILE_RS64,
+		.platform		= "rs64",
+	},
+#endif
 	{	/* Power4 */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00350000,
Only in a/arch/powerpc/kernel: cputable.c.orig
diff -urp a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -80,7 +80,34 @@ data_access_pSeries:
 	HMT_MEDIUM_PPR_DISCARD
 	DO_KVM	0x300
 	SET_SCRATCH0(r13)
+#ifndef CONFIG_BIGMEM
+BEGIN_FTR_SECTION
+	GET_PACA(r13)
+	std	r9,PACA_EXSLB+EX_R9(r13)
+	HMT_MEDIUM_PPR_SAVE(PACA_EXGEN, r9)
+	std	r10,PACA_EXSLB+EX_R10(r13)
+	mfspr	r10,SPRN_DAR
+	mfspr	r9,SPRN_DSISR
+	srdi	r10,r10,60
+	rlwimi	r10,r9,16,0x20
+	mfcr	r9
+	cmpwi	r10,0x2c
+	beq	do_stab_bolted_pSeries
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	std	r11,PACA_EXGEN+EX_R11(r13)
+	ld	r11,PACA_EXSLB+EX_R9(r13)
+	std	r12,PACA_EXGEN+EX_R12(r13)
+	GET_SCRATCH0(r12)
+	std	r10,PACA_EXGEN+EX_R10(r13)
+	std	r11,PACA_EXGEN+EX_R9(r13)
+	std	r12,PACA_EXGEN+EX_R13(r13)
+	EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
+FTR_SECTION_ELSE
+#endif
 	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
+#ifndef CONFIG_BIGMEM
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
+#endif
 
 	. = 0x380
 	.globl data_access_slb_pSeries
@@ -301,6 +328,16 @@ masked_Hinterrupt:
 	hrfid
 	b	.
 
+#ifndef CONFIG_BIGMEM
+	.align	7
+do_stab_bolted_pSeries:
+	std	r11,PACA_EXSLB+EX_R11(r13)
+	std	r12,PACA_EXSLB+EX_R12(r13)
+	GET_SCRATCH0(r10)
+	std	r10,PACA_EXSLB+EX_R13(r13)
+	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
+
+#endif
 #ifdef CONFIG_PPC_PSERIES
 /*
  * Vectors for the FWNMI option.  Share common code.
@@ -837,6 +874,14 @@ _STATIC(do_hash_page)
 	bne-	handle_page_fault	/* if not, try to insert a HPTE */
 	andis.  r0,r4,DSISR_DABRMATCH@h
 	bne-    handle_dabr_fault
+#ifndef CONFIG_BIGMEM
+
+BEGIN_FTR_SECTION
+	andis.	r0,r4,0x0020		/* Is it a segment table fault? */
+	bne-	do_ste_alloc		/* If so handle it */
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
+
+#endif
 	clrrdi	r11,r1,THREAD_SHIFT
 	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
 	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
@@ -969,6 +1014,101 @@ handle_page_fault:
 	bl	.bad_page_fault
 	b	.ret_from_except
 
+#ifndef CONFIG_BIGMEM
+	/* here we have a segment miss */
+do_ste_alloc:
+	bl	.ste_allocate		/* try to insert stab entry */
+	cmpdi	r3,0
+	bne-	handle_page_fault
+	b	fast_exception_return
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+	.align	7
+_GLOBAL(do_stab_bolted)
+	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
+	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
+
+	/* Hash to the primary group */
+	ld	r10,PACASTABVIRT(r13)
+	mfspr	r11,SPRN_DAR
+	srdi	r11,r11,28
+	rldimi	r10,r11,7,52	/* r10 = first ste of the group */
+
+	/* Calculate VSID */
+	/* This is a kernel address, so protovsid = ESID */
+	ASM_VSID_SCRAMBLE(r11, r9, 256M)
+	rldic	r9,r11,12,16	/* r9 = vsid << 12 */
+
+	/* Search the primary group for a free entry */
+1:	ld	r11,0(r10)	/* Test valid bit of the current ste	*/
+	andi.	r11,r11,0x80
+	beq	2f
+	addi	r10,r10,16
+	andi.	r11,r10,0x70
+	bne	1b
+
+	/* Stick for only searching the primary group for now.		*/
+	/* At least for now, we use a very simple random castout scheme */
+	/* Use the TB as a random number ;  OR in 1 to avoid entry 0	*/
+	mftb	r11
+	rldic	r11,r11,4,57	/* r11 = (r11 << 4) & 0x70 */
+	ori	r11,r11,0x10
+
+	/* r10 currently points to an ste one past the group of interest */
+	/* make it point to the randomly selected entry			*/
+	subi	r10,r10,128
+	or 	r10,r10,r11	/* r10 is the entry to invalidate	*/
+
+	isync			/* mark the entry invalid		*/
+	ld	r11,0(r10)
+	rldicl	r11,r11,56,1	/* clear the valid bit */
+	rotldi	r11,r11,8
+	std	r11,0(r10)
+	sync
+
+	clrrdi	r11,r11,28	/* Get the esid part of the ste		*/
+	slbie	r11
+
+2:	std	r9,8(r10)	/* Store the vsid part of the ste	*/
+	eieio
+
+	mfspr	r11,SPRN_DAR		/* Get the new esid			*/
+	clrrdi	r11,r11,28	/* Permits a full 32b of ESID		*/
+	ori	r11,r11,0x90	/* Turn on valid and kp			*/
+	std	r11,0(r10)	/* Put new entry back into the stab	*/
+
+	sync
+
+	/* All done -- return from exception. */
+	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
+	ld	r11,PACA_EXSLB+EX_SRR0(r13)	/* get saved SRR0 */
+
+	andi.	r10,r12,MSR_RI
+	beq-	unrecov_slb
+
+	mtcrf	0x80,r9			/* restore CR */
+
+	mfmsr	r10
+	clrrdi	r10,r10,2
+	mtmsrd	r10,1
+
+	mtspr	SPRN_SRR0,r11
+	mtspr	SPRN_SRR1,r12
+	ld	r9,PACA_EXSLB+EX_R9(r13)
+	ld	r10,PACA_EXSLB+EX_R10(r13)
+	ld	r11,PACA_EXSLB+EX_R11(r13)
+	ld	r12,PACA_EXSLB+EX_R12(r13)
+	ld	r13,PACA_EXSLB+EX_R13(r13)
+	rfid
+	b	.	/* prevent speculative execution */
+
+#endif
 #ifdef CONFIG_PPC_PSERIES
 /*
  * Data area reserved for FWNMI option.
@@ -1005,3 +1145,19 @@ xLparMap:
 #ifdef CONFIG_PPC_PSERIES
         . = 0x8000
 #endif /* CONFIG_PPC_PSERIES */
+#ifndef CONFIG_BIGMEM
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on).  The address is given to the hv
+ * as a page number (see xLparMap above), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+	. = STAB0_OFFSET	/* 0x8000 */
+	.globl initial_stab
+initial_stab:
+	.space	4096
+#endif
diff -urp a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -562,6 +562,11 @@ void __init setup_arch(char **cmdline_p)
 	exc_lvl_early_init();
 	emergency_stack_init();
 
+#ifndef CONFIG_BIGMEM
+#ifdef CONFIG_PPC_STD_MMU_64
+	stabs_alloc();
+#endif
+#endif
 	/* set up the bootmem stuff with available memory */
 	do_init_bootmem();
 	sparse_init();
diff -urp a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -141,7 +141,11 @@ extern char etext[];
 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
 {
 	pfn_t hpaddr;
+#ifndef CONFIG_BIGMEM
+	u64 va;
+#else
 	u64 vpn;
+#endif
 	u64 vsid;
 	struct kvmppc_sid_map *map;
 	volatile u32 *pteg;
@@ -171,7 +175,12 @@ int kvmppc_mmu_map_page(struct kvm_vcpu
 	BUG_ON(!map);
 
 	vsid = map->host_vsid;
+#ifndef CONFIG_BIGMEM
+	va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
+
+#else
 	vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | ((eaddr & ~ESID_MASK) >> VPN_SHIFT)
+#endif
 next_pteg:
 	if (rr == 16) {
 		primary = !primary;
@@ -238,11 +247,19 @@ next_pteg:
 	dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
 		    orig_pte->may_write ? 'w' : '-',
 		    orig_pte->may_execute ? 'x' : '-',
+#ifndef CONFIG_BIGMEM
+		    orig_pte->eaddr, (ulong)pteg, va,
+#else
 		    orig_pte->eaddr, (ulong)pteg, vpn,
+#endif
 		    orig_pte->vpage, hpaddr);
 
 	pte->slot = (ulong)&pteg[rr];
+#ifndef CONFIG_BIGMEM
+	pte->host_va = va;
+#else
 	pte->host_vpn = vpn;
+#endif
 	pte->pte = *orig_pte;
 	pte->pfn = hpaddr >> PAGE_SHIFT;
 
diff -urp a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -33,7 +33,11 @@
 
 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 {
+#ifndef CONFIG_BIGMEM
+	ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+#else
 	ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
+#endif
 			       MMU_PAGE_4K, MMU_SEGSIZE_256M,
 			       false);
 }
@@ -80,9 +84,15 @@ static struct kvmppc_sid_map *find_sid_v
 
 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long vpn;
+#endif
 	pfn_t hpaddr;
+#ifndef CONFIG_BIGMEM
+	ulong hash, hpteg, va;
+#else
 	ulong hash, hpteg;
+#endif
 	u64 vsid;
 	int ret;
 	int rflags = 0x192;
@@ -115,7 +125,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu
 	}
 
 	vsid = map->host_vsid;
+#ifndef CONFIG_BIGMEM
+	va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+#else
 	vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+#endif
 
 	if (!orig_pte->may_write)
 		rflags |= HPTE_R_PP;
@@ -125,7 +139,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu
 	if (!orig_pte->may_execute)
 		rflags |= HPTE_R_N;
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
+#else
 	hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
+#endif
 
 map_again:
 	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
@@ -135,8 +153,12 @@ map_again:
 		if (ppc_md.hpte_remove(hpteg) < 0)
 			return -1;
 
+#ifndef CONFIG_BIGMEM
+	ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+#else
 	ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
 				 MMU_PAGE_4K, MMU_SEGSIZE_256M);
+#endif
 
 	if (ret < 0) {
 		/* If we couldn't map a primary PTE, try a secondary */
@@ -147,8 +169,12 @@ map_again:
 	} else {
 		struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
 
+#ifndef CONFIG_BIGMEM
+		trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
+#else
 		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
 					    vpn, hpaddr, orig_pte);
+#endif
 
 		/* The ppc_md code may give us a secondary entry even though we
 		   asked for a primary. Fix up. */
@@ -158,7 +184,11 @@ map_again:
 		}
 
 		pte->slot = hpteg + (ret & 7);
+#ifndef CONFIG_BIGMEM
+		pte->host_va = va;
+#else
 		pte->host_vpn = vpn;
+#endif
 		pte->pte = *orig_pte;
 		pte->pfn = hpaddr >> PAGE_SHIFT;
 
@@ -302,8 +332,13 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcp
 		return -1;
 	vcpu3s->context_id[0] = err;
 
+#ifndef CONFIG_BIGMEM
+	vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
+	vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+#else
 	vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << ESID_BITS) - 1;
 	vcpu3s->vsid_first = vcpu3s->context_id[0] << ESID_BITS;
+#endif
 	vcpu3s->vsid_next = vcpu3s->vsid_first;
 
 	kvmppc_mmu_hpte_init(vcpu);
diff -urp a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -186,7 +186,11 @@ TRACE_EVENT(kvm_book3s_mmu_map,
 	TP_ARGS(pte),
 
 	TP_STRUCT__entry(
+#ifndef CONFIG_BIGMEM
+		__field(	u64,		host_va		)
+#else
 		__field(	u64,		host_vpn	)
+#endif
 		__field(	u64,		pfn		)
 		__field(	ulong,		eaddr		)
 		__field(	u64,		vpage		)
@@ -195,7 +199,11 @@ TRACE_EVENT(kvm_book3s_mmu_map,
 	),
 
 	TP_fast_assign(
+#ifndef CONFIG_BIGMEM
+		__entry->host_va	= pte->host_va;
+#else
 		__entry->host_vpn	= pte->host_vpn;
+#endif
 		__entry->pfn		= pte->pfn;
 		__entry->eaddr		= pte->pte.eaddr;
 		__entry->vpage		= pte->pte.vpage;
@@ -205,8 +213,13 @@ TRACE_EVENT(kvm_book3s_mmu_map,
 					  (pte->pte.may_execute ? 0x1 : 0);
 	),
 
+#ifndef CONFIG_BIGMEM
+	TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+		  __entry->host_va, __entry->pfn, __entry->eaddr,
+#else
 	TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
 		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
+#endif
 		  __entry->vpage, __entry->raddr, __entry->flags)
 );
 
@@ -215,7 +228,11 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
 	TP_ARGS(pte),
 
 	TP_STRUCT__entry(
+#ifndef CONFIG_BIGMEM
+		__field(	u64,		host_va		)
+#else
 		__field(	u64,		host_vpn	)
+#endif
 		__field(	u64,		pfn		)
 		__field(	ulong,		eaddr		)
 		__field(	u64,		vpage		)
@@ -224,7 +241,11 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
 	),
 
 	TP_fast_assign(
+#ifndef CONFIG_BIGMEM
+		__entry->host_va	= pte->host_va;
+#else
 		__entry->host_vpn	= pte->host_vpn;
+#endif
 		__entry->pfn		= pte->pfn;
 		__entry->eaddr		= pte->pte.eaddr;
 		__entry->vpage		= pte->pte.vpage;
@@ -235,7 +256,11 @@ TRACE_EVENT(kvm_book3s_mmu_invalidate,
 	),
 
 	TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+#ifndef CONFIG_BIGMEM
+		  __entry->host_va, __entry->pfn, __entry->eaddr,
+#else
 		  __entry->host_vpn, __entry->pfn, __entry->eaddr,
+#endif
 		  __entry->vpage, __entry->raddr, __entry->flags)
 );
 
diff -urp a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -71,7 +71,11 @@ _GLOBAL(__hash_page_4K)
 	/* Save non-volatile registers.
 	 * r31 will hold "old PTE"
 	 * r30 is "new PTE"
+#ifndef CONFIG_BIGMEM
+	 * r29 is "va"
+#else
 	 * r29 is vpn
+#endif
 	 * r28 is a hash value
 	 * r27 is hashtab mask (maybe dynamic patched instead ?)
 	 */
@@ -119,6 +123,17 @@ BEGIN_FTR_SECTION
 	cmpdi	r9,0			/* check segment size */
 	bne	3f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+#ifndef CONFIG_BIGMEM
+	/* Calc va and put it in r29 */
+	rldicr	r29,r5,28,63-28
+	rldicl	r3,r3,0,36
+	or	r29,r3,r29
+
+	/* Calculate hash value for primary slot and store it in r28 */
+	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */
+	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */
+	xor	r28,r5,r0
+#else
 	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT - VPN_SHIFT
 	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
@@ -130,8 +145,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
 	 */
 	rldicl	r0,r3,64-12,48
 	xor	r28,r5,r0		/* hash */
+#endif
 	b	4f
 
+#ifndef CONFIG_BIGMEM
+3:	/* Calc VA and hash in r29 and r28 for 1T segment */
+	sldi	r29,r5,40		/* vsid << 40 */
+	clrldi	r3,r3,24		/* ea & 0xffffffffff */
+	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */
+	clrldi	r5,r5,40		/* vsid & 0xffffff */
+	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */
+	xor	r28,r28,r5
+	or	r29,r3,r29		/* VA */
+#else
 3:	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT_1T - VPN_SHIFT
 	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
@@ -146,6 +172,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
 	/* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */
 	rldicl	r0,r3,64-12,36
 	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */
+#endif
 	xor	r28,r28,r0		/* hash */
 
 	/* Convert linux PTE bits into HW equivalents */
@@ -201,7 +228,11 @@ htab_insert_pte:
 
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,0			/* !bolted, !secondary */
 	li	r8,MMU_PAGE_4K		/* page size */
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -224,7 +255,11 @@ _GLOBAL(htab_call_hpte_insert1)
 	
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,HPTE_V_SECONDARY	/* !bolted, secondary */
 	li	r8,MMU_PAGE_4K		/* page size */
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -294,7 +329,11 @@ htab_modify_pte:
 	add	r3,r0,r3	/* add slot idx */
 
 	/* Call ppc_md.hpte_updatepp */
+#ifndef CONFIG_BIGMEM
+	mr	r5,r29			/* va */
+#else
 	mr	r5,r29			/* vpn */
+#endif
 	li	r6,MMU_PAGE_4K		/* page size */
 	ld	r7,STK_PARM(r9)(r1)	/* segment size */
 	ld	r8,STK_PARM(r8)(r1)	/* get "local" param */
@@ -355,7 +394,11 @@ _GLOBAL(__hash_page_4K)
 	/* Save non-volatile registers.
 	 * r31 will hold "old PTE"
 	 * r30 is "new PTE"
+#ifndef CONFIG_BIGMEM
+	 * r29 is "va"
+#else
 	 * r29 is vpn
+#endif
 	 * r28 is a hash value
 	 * r27 is hashtab mask (maybe dynamic patched instead ?)
 	 * r26 is the hidx mask
@@ -410,6 +453,17 @@ BEGIN_FTR_SECTION
 	cmpdi	r9,0			/* check segment size */
 	bne	3f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+#ifndef CONFIG_BIGMEM
+	/* Calc va and put it in r29 */
+	rldicr	r29,r5,28,63-28		/* r29 = (vsid << 28) */
+	rldicl	r3,r3,0,36		/* r3 = (ea & 0x0fffffff) */
+	or	r29,r3,r29		/* r29 = va */
+
+	/* Calculate hash value for primary slot and store it in r28 */
+	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */
+	rldicl	r0,r3,64-12,48		/* (ea >> 12) & 0xffff */
+	xor	r28,r5,r0
+#else
 	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT - VPN_SHIFT
 	/*
@@ -425,8 +479,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
 	 */
 	rldicl	r0,r3,64-12,48
 	xor	r28,r5,r0		/* hash */
+#endif
 	b	4f
 
+#ifndef CONFIG_BIGMEM
+3:	/* Calc VA and hash in r29 and r28 for 1T segment */
+	sldi	r29,r5,40		/* vsid << 40 */
+	clrldi	r3,r3,24		/* ea & 0xffffffffff */
+	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */
+	clrldi	r5,r5,40		/* vsid & 0xffffff */
+	rldicl	r0,r3,64-12,36		/* (ea >> 12) & 0xfffffff */
+	xor	r28,r28,r5
+	or	r29,r3,r29		/* VA */
+#else
 3:	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT_1T - VPN_SHIFT
 	/*
@@ -445,6 +510,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
 	/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
 	rldicl	r0,r3,64-12,36
 	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */
+#endif
 	xor	r28,r28,r0		/* hash */
 
 	/* Convert linux PTE bits into HW equivalents */
@@ -520,7 +586,11 @@ htab_special_pfn:
 
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,0			/* !bolted, !secondary */
 	li	r8,MMU_PAGE_4K		/* page size */
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -547,7 +617,11 @@ _GLOBAL(htab_call_hpte_insert1)
 
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,HPTE_V_SECONDARY	/* !bolted, secondary */
 	li	r8,MMU_PAGE_4K		/* page size */
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -579,7 +653,11 @@ _GLOBAL(htab_call_hpte_remove)
 	 * useless now that the segment has been switched to 4k pages.
 	 */
 htab_inval_old_hpte:
+#ifndef CONFIG_BIGMEM
+	mr	r3,r29			/* virtual addr */
+#else
 	mr	r3,r29			/* vpn */
+#endif
 	mr	r4,r31			/* PTE.pte */
 	li	r5,0			/* PTE.hidx */
 	li	r6,MMU_PAGE_64K		/* psize */
@@ -652,7 +730,11 @@ htab_modify_pte:
 	add	r3,r0,r3	/* add slot idx */
 
 	/* Call ppc_md.hpte_updatepp */
+#ifndef CONFIG_BIGMEM
+	mr	r5,r29			/* va */
+#else
 	mr	r5,r29			/* vpn */
+#endif
 	li	r6,MMU_PAGE_4K		/* page size */
 	ld	r7,STK_PARM(r9)(r1)	/* segment size */
 	ld	r8,STK_PARM(r8)(r1)	/* get "local" param */
@@ -708,7 +790,11 @@ _GLOBAL(__hash_page_64K)
 	/* Save non-volatile registers.
 	 * r31 will hold "old PTE"
 	 * r30 is "new PTE"
+#ifndef CONFIG_BIGMEM
+	 * r29 is "va"
+#else
 	 * r29 is vpn
+#endif
 	 * r28 is a hash value
 	 * r27 is hashtab mask (maybe dynamic patched instead ?)
 	 */
@@ -761,19 +847,43 @@ BEGIN_FTR_SECTION
 	cmpdi	r9,0			/* check segment size */
 	bne	3f
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+#ifndef CONFIG_BIGMEM
+	/* Calc va and put it in r29 */
+	rldicr	r29,r5,28,63-28
+	rldicl	r3,r3,0,36
+	or	r29,r3,r29
+#else
 	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT - VPN_SHIFT
 	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
 	or	r29,r28,r29
+#endif
 
+#ifndef CONFIG_BIGMEM
+	/* Calculate hash value for primary slot and store it in r28 */
+	rldicl	r5,r5,0,25		/* vsid & 0x0000007fffffffff */
+	rldicl	r0,r3,64-16,52		/* (ea >> 16) & 0xfff */
+	xor	r28,r5,r0
+#else
 	/* Calculate hash value for primary slot and store it in r28
 	 * r3 = va, r5 = vsid
 	 * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
 	 */
 	rldicl	r0,r3,64-16,52
 	xor	r28,r5,r0		/* hash */
+#endif
 	b	4f
 
+#ifndef CONFIG_BIGMEM
+3:	/* Calc VA and hash in r29 and r28 for 1T segment */
+	sldi	r29,r5,40		/* vsid << 40 */
+	clrldi	r3,r3,24		/* ea & 0xffffffffff */
+	rldic	r28,r5,25,25		/* (vsid << 25) & 0x7fffffffff */
+	clrldi	r5,r5,40		/* vsid & 0xffffff */
+	rldicl	r0,r3,64-16,40		/* (ea >> 16) & 0xffffff */
+	xor	r28,r28,r5
+	or	r29,r3,r29		/* VA */
+#else
 3:	/* Calc vpn and put it in r29 */
 	sldi	r29,r5,SID_SHIFT_1T - VPN_SHIFT
 	rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
@@ -787,6 +897,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
 	/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
 	rldicl	r0,r3,64-16,40
 	xor	r28,r28,r5		/* vsid ^ ( vsid << 25) */
+#endif
 	xor	r28,r28,r0		/* hash */
 
 	/* Convert linux PTE bits into HW equivalents */
@@ -845,7 +956,11 @@ ht64_insert_pte:
 
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,0			/* !bolted, !secondary */
 	li	r8,MMU_PAGE_64K
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -868,7 +983,11 @@ _GLOBAL(ht64_call_hpte_insert1)
 
 	/* Call ppc_md.hpte_insert */
 	ld	r6,STK_PARM(r4)(r1)	/* Retrieve new pp bits */
+#ifndef CONFIG_BIGMEM
+	mr	r4,r29			/* Retrieve va */
+#else
 	mr	r4,r29			/* Retrieve vpn */
+#endif
 	li	r7,HPTE_V_SECONDARY	/* !bolted, secondary */
 	li	r8,MMU_PAGE_64K
 	ld	r9,STK_PARM(r9)(r1)	/* segment size */
@@ -938,7 +1057,11 @@ ht64_modify_pte:
 	add	r3,r0,r3	/* add slot idx */
 
 	/* Call ppc_md.hpte_updatepp */
+#ifndef CONFIG_BIGMEM
+	mr	r5,r29			/* va */
+#else
 	mr	r5,r29			/* vpn */
+#endif
 	li	r6,MMU_PAGE_64K
 	ld	r7,STK_PARM(r9)(r1)	/* segment size */
 	ld	r8,STK_PARM(r8)(r1)	/* get "local" param */
diff -urp a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -39,11 +39,20 @@
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
+#ifndef CONFIG_BIGMEM
+static inline void __tlbie(unsigned long va, int psize, int ssize)
+#else
 static inline void __tlbie(unsigned long vpn, int psize, int ssize)
+#endif
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long va;
+#endif
 	unsigned int penc;
 
+#ifndef CONFIG_BIGMEM
+	/* clear top 16 bits, non SLS segment */
+#else
 	/*
 	 * We need 14 to 65 bits of va for a tlibe of 4K page
 	 * With vpn we ignore the lower VPN_SHIFT bits already.
@@ -57,17 +66,23 @@ static inline void __tlbie(unsigned long
 	 * Older versions of the architecture (2.02 and earler) require the
 	 * masking of the top 16 bits.
 	 */
+#endif
 	va &= ~(0xffffULL << 48);
 
 	switch (psize) {
 	case MMU_PAGE_4K:
+#ifndef CONFIG_BIGMEM
+		va &= ~0xffful;
+#endif
 		va |= ssize << 8;
 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
 			     : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
 			     : "memory");
 		break;
 	default:
+#ifdef CONFIG_BIGMEM
 		/* We need 14 to 14 + i bits of va */
+#endif
 		penc = mmu_psize_defs[psize].penc;
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
 		va |= penc << 12;
@@ -80,11 +95,20 @@ static inline void __tlbie(unsigned long
 	}
 }
 
+#ifndef CONFIG_BIGMEM
+static inline void __tlbiel(unsigned long va, int psize, int ssize)
+#else
 static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
+#endif
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long va;
+#endif
 	unsigned int penc;
 
+#ifndef CONFIG_BIGMEM
+	/* clear top 16 bits, non SLS segment */
+#else
 	/* VPN_SHIFT can be atmost 12 */
 	va = vpn << VPN_SHIFT;
 	/*
@@ -92,16 +116,22 @@ static inline void __tlbiel(unsigned lon
 	 * Older versions of the architecture (2.02 and earler) require the
 	 * masking of the top 16 bits.
 	 */
+#endif
 	va &= ~(0xffffULL << 48);
 
 	switch (psize) {
 	case MMU_PAGE_4K:
+#ifndef CONFIG_BIGMEM
+		va &= ~0xffful;
+#endif
 		va |= ssize << 8;
 		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
 			     : : "r"(va) : "memory");
 		break;
 	default:
+#ifdef CONFIG_BIGMEM
 		/* We need 14 to 14 + i bits of va */
+#endif
 		penc = mmu_psize_defs[psize].penc;
 		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
 		va |= penc << 12;
@@ -114,7 +144,11 @@ static inline void __tlbiel(unsigned lon
 
 }
 
+#ifndef CONFIG_BIGMEM
+static inline void tlbie(unsigned long va, int psize, int ssize, int local)
+#else
 static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
+#endif
 {
 	unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -125,10 +159,18 @@ static inline void tlbie(unsigned long v
 		raw_spin_lock(&native_tlbie_lock);
 	asm volatile("ptesync": : :"memory");
 	if (use_local) {
+#ifndef CONFIG_BIGMEM
+		__tlbiel(va, psize, ssize);
+#else
 		__tlbiel(vpn, psize, ssize);
+#endif
 		asm volatile("ptesync": : :"memory");
 	} else {
+#ifndef CONFIG_BIGMEM
+		__tlbie(va, psize, ssize);
+#else
 		__tlbie(vpn, psize, ssize);
+#endif
 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
 	}
 	if (lock_tlbie && !use_local)
@@ -154,7 +196,11 @@ static inline void native_unlock_hpte(st
 	clear_bit_unlock(HPTE_LOCK_BIT, word);
 }
 
+#ifndef CONFIG_BIGMEM
+static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+#else
 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
+#endif
 			unsigned long pa, unsigned long rflags,
 			unsigned long vflags, int psize, int ssize)
 {
@@ -163,9 +209,17 @@ static long native_hpte_insert(unsigned
 	int i;
 
 	if (!(vflags & HPTE_V_BOLTED)) {
+#ifndef CONFIG_BIGMEM
+		DBG_LOW("    insert(group=%lx, va=%016lx, pa=%016lx,"
+#else
 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
+#endif
 			" rflags=%lx, vflags=%lx, psize=%d)\n",
+#ifndef CONFIG_BIGMEM
+			hpte_group, va, pa, rflags, vflags, psize);
+#else
 			hpte_group, vpn, pa, rflags, vflags, psize);
+#endif
 	}
 
 	for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -183,7 +237,11 @@ static long native_hpte_insert(unsigned
 	if (i == HPTES_PER_GROUP)
 		return -1;
 
+#ifndef CONFIG_BIGMEM
+	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+#else
 	hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
+#endif
 	hpte_r = hpte_encode_r(pa, psize) | rflags;
 
 	if (!(vflags & HPTE_V_BOLTED)) {
@@ -245,17 +303,30 @@ static long native_hpte_remove(unsigned
 }
 
 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
+#ifndef CONFIG_BIGMEM
+				 unsigned long va, int psize, int ssize,
+#else
 				 unsigned long vpn, int psize, int ssize,
+#endif
 				 int local)
 {
 	struct hash_pte *hptep = htab_address + slot;
 	unsigned long hpte_v, want_v;
 	int ret = 0;
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, ssize);
+#else
 	want_v = hpte_encode_v(vpn, psize, ssize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	DBG_LOW("    update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
+		va, want_v & HPTE_V_AVPN, slot, newpp);
+#else
 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
+#endif
 
 	native_lock_hpte(hptep);
 
@@ -274,12 +345,20 @@ static long native_hpte_updatepp(unsigne
 	native_unlock_hpte(hptep);
 
 	/* Ensure it is out of the tlb too. */
+#ifndef CONFIG_BIGMEM
+	tlbie(va, psize, ssize, local);
+#else
 	tlbie(vpn, psize, ssize, local);
+#endif
 
 	return ret;
 }
 
+#ifndef CONFIG_BIGMEM
+static long native_hpte_find(unsigned long va, int psize, int ssize)
+#else
 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
+#endif
 {
 	struct hash_pte *hptep;
 	unsigned long hash;
@@ -287,8 +366,13 @@ static long native_hpte_find(unsigned lo
 	long slot;
 	unsigned long want_v, hpte_v;
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
+	want_v = hpte_encode_v(va, psize, ssize);
+#else
 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
 	want_v = hpte_encode_v(vpn, psize, ssize);
+#endif
 
 	/* Bolted mappings are only ever in the primary group */
 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -315,15 +399,27 @@ static long native_hpte_find(unsigned lo
 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 				       int psize, int ssize)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long vsid, va;
+#else
 	unsigned long vpn;
 	unsigned long vsid;
+#endif
 	long slot;
 	struct hash_pte *hptep;
 
 	vsid = get_kernel_vsid(ea, ssize);
+#ifndef CONFIG_BIGMEM
+	va = hpt_va(ea, vsid, ssize);
+#else
 	vpn = hpt_vpn(ea, vsid, ssize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	slot = native_hpte_find(va, psize, ssize);
+#else
 	slot = native_hpte_find(vpn, psize, ssize);
+#endif
 	if (slot == -1)
 		panic("could not find page to bolt\n");
 	hptep = htab_address + slot;
@@ -333,10 +429,18 @@ static void native_hpte_updateboltedpp(u
 		(newpp & (HPTE_R_PP | HPTE_R_N));
 
 	/* Ensure it is out of the tlb too. */
+#ifndef CONFIG_BIGMEM
+	tlbie(va, psize, ssize, 0);
+#else
 	tlbie(vpn, psize, ssize, 0);
+#endif
 }
 
+#ifndef CONFIG_BIGMEM
+static void native_hpte_invalidate(unsigned long slot, unsigned long va,
+#else
 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
+#endif
 				   int psize, int ssize, int local)
 {
 	struct hash_pte *hptep = htab_address + slot;
@@ -346,9 +450,17 @@ static void native_hpte_invalidate(unsig
 
 	local_irq_save(flags);
 
+#ifndef CONFIG_BIGMEM
+	DBG_LOW("    invalidate(va=%016lx, hash: %x)\n", va, slot);
+#else
 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, ssize);
+#else
 	want_v = hpte_encode_v(vpn, psize, ssize);
+#endif
 	native_lock_hpte(hptep);
 	hpte_v = hptep->v;
 
@@ -360,7 +472,11 @@ static void native_hpte_invalidate(unsig
 		hptep->v = 0;
 
 	/* Invalidate the TLB */
+#ifndef CONFIG_BIGMEM
+	tlbie(va, psize, ssize, local);
+#else
 	tlbie(vpn, psize, ssize, local);
+#endif
 
 	local_irq_restore(flags);
 }
@@ -370,12 +486,22 @@ static void native_hpte_invalidate(unsig
 #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
 
 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
+#ifndef CONFIG_BIGMEM
+			int *psize, int *ssize, unsigned long *va)
+#else
 			int *psize, int *ssize, unsigned long *vpn)
+#endif
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long avpn, pteg, vpi;
+#endif
 	unsigned long hpte_r = hpte->r;
 	unsigned long hpte_v = hpte->v;
+#ifndef CONFIG_BIGMEM
+	unsigned long avpn;
+#else
 	unsigned long vsid, seg_off;
+#endif
 	int i, size, shift, penc;
 
 	if (!(hpte_v & HPTE_V_LARGE))
@@ -402,14 +528,35 @@ static void hpte_decode(struct hash_pte
 	}
 
 	/* This works for all page sizes, and for 256M and 1T segments */
+#ifdef CONFIG_BIGMEM
 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+#endif
 	shift = mmu_psize_defs[size].shift;
-
+#ifndef CONFIG_BIGMEM
+	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
+#endif
+
+#ifndef CONFIG_BIGMEM
+	if (shift < 23) {
+		unsigned long vpi, vsid, pteg;
+#else
 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
 	pteg = slot / HPTES_PER_GROUP;
 	if (hpte_v & HPTE_V_SECONDARY)
 		pteg = ~pteg;
+#endif
 
+#ifndef CONFIG_BIGMEM
+		pteg = slot / HPTES_PER_GROUP;
+		if (hpte_v & HPTE_V_SECONDARY)
+			pteg = ~pteg;
+		switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
+		case MMU_SEGSIZE_256M:
+			vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
+			break;
+		case MMU_SEGSIZE_1T:
+			vsid = avpn >> 40;
+#else
 	switch (*ssize) {
 	case MMU_SEGSIZE_256M:
 		/* We only have 28 - 23 bits of seg_off in avpn */
@@ -426,14 +573,32 @@ static void hpte_decode(struct hash_pte
 		seg_off = (avpn & 0x1ffff) << 23;
 		vsid    = avpn >> 17;
 		if (shift < 23) {
+#endif
 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
+#ifndef CONFIG_BIGMEM
+			break;
+		default:
+			avpn = vpi = size = 0;
+#else
 			seg_off |= vpi << shift;
+#endif
 		}
+#ifndef CONFIG_BIGMEM
+		avpn |= (vpi << mmu_psize_defs[size].shift);
+#else
 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
 	default:
 		*vpn = size = 0;
+#endif
 	}
+#ifndef CONFIG_BIGMEM
+
+	*va = avpn;
+#endif
 	*psize = size;
+#ifndef CONFIG_BIGMEM
+	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+#endif
 }
 
 /*
@@ -446,10 +611,16 @@ static void hpte_decode(struct hash_pte
  */
 static void native_hpte_clear(void)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long vpn = 0;
+#endif
 	unsigned long slot, slots, flags;
 	struct hash_pte *hptep = htab_address;
+#ifndef CONFIG_BIGMEM
+	unsigned long hpte_v, va;
+#else
 	unsigned long hpte_v;
+#endif
 	unsigned long pteg_count;
 	int psize, ssize;
 
@@ -477,9 +648,17 @@ static void native_hpte_clear(void)
 		 * already hold the native_tlbie_lock.
 		 */
 		if (hpte_v & HPTE_V_VALID) {
+#ifndef CONFIG_BIGMEM
+			hpte_decode(hptep, slot, &psize, &ssize, &va);
+#else
 			hpte_decode(hptep, slot, &psize, &ssize, &vpn);
+#endif
 			hptep->v = 0;
+#ifndef CONFIG_BIGMEM
+			__tlbie(va, psize, ssize);
+#else
 			__tlbie(vpn, psize, ssize);
+#endif
 		}
 	}
 
@@ -494,8 +673,12 @@ static void native_hpte_clear(void)
  */
 static void native_flush_hash_range(unsigned long number, int local)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long va, hash, index, hidx, shift, slot;
+#else
 	unsigned long vpn;
 	unsigned long hash, index, hidx, shift, slot;
+#endif
 	struct hash_pte *hptep;
 	unsigned long hpte_v;
 	unsigned long want_v;
@@ -509,18 +692,31 @@ static void native_flush_hash_range(unsi
 	local_irq_save(flags);
 
 	for (i = 0; i < number; i++) {
+#ifndef CONFIG_BIGMEM
+		va = batch->vaddr[i];
+#else
 		vpn = batch->vpn[i];
+#endif
 		pte = batch->pte[i];
 
+#ifndef CONFIG_BIGMEM
+		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+			hash = hpt_hash(va, shift, ssize);
+#else
 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
 			hash = hpt_hash(vpn, shift, ssize);
+#endif
 			hidx = __rpte_to_hidx(pte, index);
 			if (hidx & _PTEIDX_SECONDARY)
 				hash = ~hash;
 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 			slot += hidx & _PTEIDX_GROUP_IX;
 			hptep = htab_address + slot;
+#ifndef CONFIG_BIGMEM
+			want_v = hpte_encode_v(va, psize, ssize);
+#else
 			want_v = hpte_encode_v(vpn, psize, ssize);
+#endif
 			native_lock_hpte(hptep);
 			hpte_v = hptep->v;
 			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -535,12 +731,22 @@ static void native_flush_hash_range(unsi
 	    mmu_psize_defs[psize].tlbiel && local) {
 		asm volatile("ptesync":::"memory");
 		for (i = 0; i < number; i++) {
+#ifndef CONFIG_BIGMEM
+			va = batch->vaddr[i];
+#else
 			vpn = batch->vpn[i];
+#endif
 			pte = batch->pte[i];
 
+#ifndef CONFIG_BIGMEM
+			pte_iterate_hashed_subpages(pte, psize, va, index,
+						    shift) {
+				__tlbiel(va, psize, ssize);
+#else
 			pte_iterate_hashed_subpages(pte, psize,
 						    vpn, index, shift) {
 				__tlbiel(vpn, psize, ssize);
+#endif
 			} pte_iterate_hashed_end();
 		}
 		asm volatile("ptesync":::"memory");
@@ -552,12 +758,22 @@ static void native_flush_hash_range(unsi
 
 		asm volatile("ptesync":::"memory");
 		for (i = 0; i < number; i++) {
+#ifndef CONFIG_BIGMEM
+			va = batch->vaddr[i];
+#else
 			vpn = batch->vpn[i];
+#endif
 			pte = batch->pte[i];
 
+#ifndef CONFIG_BIGMEM
+			pte_iterate_hashed_subpages(pte, psize, va, index,
+						    shift) {
+				__tlbie(va, psize, ssize);
+#else
 			pte_iterate_hashed_subpages(pte, psize,
 						    vpn, index, shift) {
 				__tlbie(vpn, psize, ssize);
+#endif
 			} pte_iterate_hashed_end();
 		}
 		asm volatile("eieio; tlbsync; ptesync":::"memory");
diff -urp a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -194,23 +194,37 @@ int htab_bolt_mapping(unsigned long vsta
 	     vaddr += step, paddr += step) {
 		unsigned long hash, hpteg;
 		unsigned long vsid = get_kernel_vsid(vaddr, ssize);
+#ifndef CONFIG_BIGMEM
+		unsigned long va = hpt_va(vaddr, vsid, ssize);
+#else
 		unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
+#endif
 		unsigned long tprot = prot;
 
+#ifdef CONFIG_BIGMEM
 		/*
 		 * If we hit a bad address return error.
 		 */
 		if (!vsid)
 			return -1;
+#endif
 		/* Make kernel text executable */
 		if (overlaps_kernel_text(vaddr, vaddr + step))
 			tprot &= ~HPTE_R_N;
 
+#ifndef CONFIG_BIGMEM
+		hash = hpt_hash(va, shift, ssize);
+#else
 		hash = hpt_hash(vpn, shift, ssize);
+#endif
 		hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
 		BUG_ON(!ppc_md.hpte_insert);
+#ifndef CONFIG_BIGMEM
+		ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
+#else
 		ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
+#endif
 					 HPTE_V_BOLTED, psize, ssize);
 
 		if (ret < 0)
@@ -753,15 +767,35 @@ static void __init htab_initialize(void)
 
 void __init early_init_mmu(void)
 {
+#ifndef CONFIG_BIGMEM
+	/* Setup initial STAB address in the PACA */
+	get_paca()->stab_real = __pa((u64)&initial_stab);
+	get_paca()->stab_addr = (u64)&initial_stab;
+
+#endif
 	/* Initialize the MMU Hash table and create the linear mapping
+#ifndef CONFIG_BIGMEM
+	 * of memory. Has to be done before stab/slb initialization as
+	 * this is currently where the page size encoding is obtained
+#else
 	 * of memory. Has to be done before SLB initialization as this is
 	 * currently where the page size encoding is obtained.
+#endif
 	 */
 	htab_initialize();
 
+#ifndef CONFIG_BIGMEM
+	/* Initialize stab / SLB management except on iSeries
+	 */
+#else
 	/* Initialize SLB management */
+#endif
 	if (mmu_has_feature(MMU_FTR_SLB))
 		slb_initialize();
+#ifndef CONFIG_BIGMEM
+	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
+		stab_initialize(get_paca()->stab_real);
+#endif
 }
 
 #ifdef CONFIG_SMP
@@ -771,9 +805,20 @@ void __cpuinit early_init_mmu_secondary(
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
 		mtspr(SPRN_SDR1, _SDR1);
 
+#ifndef CONFIG_BIGMEM
+	/* Initialize STAB/SLB. We use a virtual address as it works
+	 * in real mode on pSeries and we want a virtual address on
+	 * iSeries anyway
+	 */
+#else
 	/* Initialize SLB */
+#endif
 	if (mmu_has_feature(MMU_FTR_SLB))
 		slb_initialize();
+#ifndef CONFIG_BIGMEM
+	else
+		stab_initialize(get_paca()->stab_addr);
+#endif
 }
 #endif /* CONFIG_SMP */
 
@@ -803,19 +848,37 @@ unsigned int hash_page_do_lazy_icache(un
 #ifdef CONFIG_PPC_MM_SLICES
 unsigned int get_paca_psize(unsigned long addr)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long index, slices;
+#else
 	u64 lpsizes;
 	unsigned char *hpsizes;
 	unsigned long index, mask_index;
+#endif
 
 	if (addr < SLICE_LOW_TOP) {
+#ifndef CONFIG_BIGMEM
+		slices = get_paca()->context.low_slices_psize;
+#else
 		lpsizes = get_paca()->context.low_slices_psize;
+#endif
 		index = GET_LOW_SLICE_INDEX(addr);
+#ifndef CONFIG_BIGMEM
+	} else {
+		slices = get_paca()->context.high_slices_psize;
+		index = GET_HIGH_SLICE_INDEX(addr);
+#else
 		return (lpsizes >> (index * 4)) & 0xF;
+#endif
 	}
+#ifndef CONFIG_BIGMEM
+	return (slices >> (index * 4)) & 0xF;
+#else
 	hpsizes = get_paca()->context.high_slices_psize;
 	index = GET_HIGH_SLICE_INDEX(addr);
 	mask_index = index & 0x1;
 	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
+#endif
 }
 
 #else
@@ -921,6 +984,13 @@ int hash_page(unsigned long ea, unsigned
 	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
 		ea, access, trap);
 
+#ifndef CONFIG_BIGMEM
+	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
+		DBG_LOW(" out of pgtable range !\n");
+ 		return 1;
+	}
+
+#endif
 	/* Get region & vsid */
  	switch (REGION_ID(ea)) {
 	case USER_REGION_ID:
@@ -951,11 +1021,13 @@ int hash_page(unsigned long ea, unsigned
 	}
 	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
+#ifdef CONFIG_BIGMEM
 	/* Bad address. */
 	if (!vsid) {
 		DBG_LOW("Bad address!\n");
 		return 1;
 	}
+#endif
 	/* Get pgdir */
 	pgdir = mm->pgd;
 	if (pgdir == NULL)
@@ -1135,8 +1207,10 @@ void hash_preload(struct mm_struct *mm,
 	/* Get VSID */
 	ssize = user_segment_size(ea);
 	vsid = get_vsid(mm->context.id, ea, ssize);
+#ifdef CONFIG_BIGMEM
 	if (!vsid)
 		return;
+#endif
 
 	/* Hash doesn't like irqs */
 	local_irq_save(flags);
@@ -1167,21 +1241,35 @@ void hash_preload(struct mm_struct *mm,
 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
  *          do not forget to update the assembly call site !
  */
+#ifndef CONFIG_BIGMEM
+void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
+#else
 void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
+#endif
 		     int local)
 {
 	unsigned long hash, index, shift, hidx, slot;
 
+#ifndef CONFIG_BIGMEM
+	DBG_LOW("flush_hash_page(va=%016lx)\n", va);
+	pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+		hash = hpt_hash(va, shift, ssize);
+#else
 	DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
 	pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
 		hash = hpt_hash(vpn, shift, ssize);
+#endif
 		hidx = __rpte_to_hidx(pte, index);
 		if (hidx & _PTEIDX_SECONDARY)
 			hash = ~hash;
 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 		slot += hidx & _PTEIDX_GROUP_IX;
 		DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
+#ifndef CONFIG_BIGMEM
+		ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
+#else
 		ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
+#endif
 	} pte_iterate_hashed_end();
 }
 
@@ -1195,7 +1283,11 @@ void flush_hash_range(unsigned long numb
 			&__get_cpu_var(ppc64_tlb_batch);
 
 		for (i = 0; i < number; i++)
+#ifndef CONFIG_BIGMEM
+			flush_hash_page(batch->vaddr[i], batch->pte[i],
+#else
 			flush_hash_page(batch->vpn[i], batch->pte[i],
+#endif
 					batch->psize, batch->ssize, local);
 	}
 }
@@ -1222,17 +1314,29 @@ static void kernel_map_linear_page(unsig
 {
 	unsigned long hash, hpteg;
 	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
+#ifndef CONFIG_BIGMEM
+	unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+#else
 	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
+#endif
 	unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
 	int ret;
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+#else
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
+#endif
 	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
+#ifndef CONFIG_BIGMEM
+	ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
+#else
 	/* Don't create HPTE entries for bad address */
 	if (!vsid)
 		return;
 	ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
+#endif
 				 mode, HPTE_V_BOLTED,
 				 mmu_linear_psize, mmu_kernel_ssize);
 	BUG_ON (ret < 0);
@@ -1246,9 +1350,17 @@ static void kernel_unmap_linear_page(uns
 {
 	unsigned long hash, hidx, slot;
 	unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
+#ifndef CONFIG_BIGMEM
+	unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+#else
 	unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+#else
 	hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
+#endif
 	spin_lock(&linear_map_hash_lock);
 	BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
 	hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1258,7 +1370,11 @@ static void kernel_unmap_linear_page(uns
 		hash = ~hash;
 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 	slot += hidx & _PTEIDX_GROUP_IX;
+#ifndef CONFIG_BIGMEM
+	ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
+#else
 	ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
+#endif
 }
 
 void kernel_map_pages(struct page *page, int numpages, int enable)
Only in a/arch/powerpc/mm: hash_utils_64.c.orig
diff -urp a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,15 +18,25 @@ int __hash_page_huge(unsigned long ea, u
 		     pte_t *ptep, unsigned long trap, int local, int ssize,
 		     unsigned int shift, unsigned int mmu_psize)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long vpn;
+#endif
 	unsigned long old_pte, new_pte;
+#ifndef CONFIG_BIGMEM
+	unsigned long va, rflags, pa, sz;
+#else
 	unsigned long rflags, pa, sz;
+#endif
 	long slot;
 
 	BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 
 	/* Search the Linux page table for a match with va */
+#ifndef CONFIG_BIGMEM
+	va = hpt_va(ea, vsid, ssize);
+#else
 	vpn = hpt_vpn(ea, vsid, ssize);
+#endif
 
 	/* At this point, we have a pte (old_pte) which can be used to build
 	 * or update an HPTE. There are 2 cases:
@@ -70,19 +80,31 @@ int __hash_page_huge(unsigned long ea, u
 		/* There MIGHT be an HPTE for this pte */
 		unsigned long hash, slot;
 
+#ifndef CONFIG_BIGMEM
+		hash = hpt_hash(va, shift, ssize);
+#else
 		hash = hpt_hash(vpn, shift, ssize);
+#endif
 		if (old_pte & _PAGE_F_SECOND)
 			hash = ~hash;
 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 		slot += (old_pte & _PAGE_F_GIX) >> 12;
 
+#ifndef CONFIG_BIGMEM
+		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
+#else
 		if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
+#endif
 					 ssize, local) == -1)
 			old_pte &= ~_PAGE_HPTEFLAGS;
 	}
 
 	if (likely(!(old_pte & _PAGE_HASHPTE))) {
+#ifndef CONFIG_BIGMEM
+		unsigned long hash = hpt_hash(va, shift, ssize);
+#else
 		unsigned long hash = hpt_hash(vpn, shift, ssize);
+#endif
 		unsigned long hpte_group;
 
 		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -102,14 +124,22 @@ repeat:
 				      _PAGE_COHERENT | _PAGE_GUARDED));
 
 		/* Insert into the hash table, primary slot */
+#ifndef CONFIG_BIGMEM
+		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
+#else
 		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+#endif
 					  mmu_psize, ssize);
 
 		/* Primary is full, try the secondary */
 		if (unlikely(slot == -1)) {
 			hpte_group = ((~hash & htab_hash_mask) *
 				      HPTES_PER_GROUP) & ~0x7UL;
+#ifndef CONFIG_BIGMEM
+			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
+#else
 			slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
+#endif
 						  HPTE_V_SECONDARY,
 						  mmu_psize, ssize);
 			if (slot == -1) {
diff -urp a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -72,6 +72,12 @@
 #if PGTABLE_RANGE > USER_VSID_RANGE
 #warning Limited user VSID range means pagetable space is wasted
 #endif
+#ifndef CONFIG_BIGMEM
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+#endif
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 phys_addr_t memstart_addr = ~0;
diff -urp a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -222,18 +222,33 @@ EXPORT_SYMBOL_GPL(drop_cop);
 static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDA(mmu_context_ida);
 
+#ifndef CONFIG_BIGMEM
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes.  Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define MAX_CONTEXT	((1UL << 19) - 1)
+
+#endif
 int __init_new_context(void)
 {
 	int index;
 	int err;
+#ifdef CONFIG_BIGMEM
 	unsigned long max;
+#endif
 
 again:
 	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
 		return -ENOMEM;
 
 	spin_lock(&mmu_context_lock);
+#ifndef CONFIG_BIGMEM
+	err = ida_get_new_above(&mmu_context_ida, 1, &index);
+#else
 	err = ida_get_new_above(&mmu_context_ida, MIN_USER_CONTEXT, &index);
+#endif
 	spin_unlock(&mmu_context_lock);
 
 	if (err == -EAGAIN)
@@ -241,12 +256,16 @@ again:
 	else if (err)
 		return err;
 
+#ifndef CONFIG_BIGMEM
+	if (index > MAX_CONTEXT) {
+#else
 	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
 		max = MAX_USER_CONTEXT;
 	else
 		max = MAX_USER_CONTEXT_65BIT_VA;
 
 	if (index > max) {
+#endif
 		spin_lock(&mmu_context_lock);
 		ida_remove(&mmu_context_ida, index);
 		spin_unlock(&mmu_context_lock);
Only in a/arch/powerpc/mm: mmu_context_hash64.c.orig
diff -urp a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -56,6 +56,9 @@
 
 #include "mmu_decl.h"
 
+#ifndef CONFIG_BIGMEM
+unsigned long ioremap_bot = IOREMAP_BASE;
+#else
 /* Some sanity checking */
 #if TASK_SIZE_USER64 > PGTABLE_RANGE
 #error TASK_SIZE_USER64 exceeds pagetable range
@@ -66,8 +69,11 @@
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
 #endif
+#endif
 
+#ifdef CONFIG_BIGMEM
 unsigned long ioremap_bot = IOREMAP_BASE;
+#endif
 
 #ifdef CONFIG_PPC_MMU_NOHASH
 static void *early_alloc_pgtable(unsigned long size)
diff -urp a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -23,6 +23,7 @@
 #include <asm/pgtable.h>
 #include <asm/firmware.h>
 
+#ifdef CONFIG_BIGMEM
 /*
  * This macro generates asm code to compute the VSID scramble
  * function.  Used in slb_allocate() and do_stab_bolted.  The function
@@ -65,6 +66,7 @@ MMU_FTR_SECTION_ELSE							\
 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
 
 
+#endif
 /* void slb_allocate_realmode(unsigned long ea);
  *
  * Create an SLB entry for the given EA (user or kernel).
@@ -73,20 +75,37 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68
  * No other registers are examined or changed.
  */
 _GLOBAL(slb_allocate_realmode)
+#ifndef CONFIG_BIGMEM
+	/* r3 = faulting address */
+#else
 	/*
 	 * check for bad kernel/user address
 	 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
 	 */
 	rldicr. r9,r3,4,(63 - PGTABLE_EADDR_SIZE - 4)
 	bne-	8f
+#endif
 
 	srdi	r9,r3,60		/* get region */
+#ifndef CONFIG_BIGMEM
+	srdi	r10,r3,28		/* get esid */
+#else
 	srdi	r10,r3,SID_SHIFT	/* get esid */
+#endif
 	cmpldi	cr7,r9,0xc		/* cmp PAGE_OFFSET for later use */
 
 	/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
 	blt	cr7,0f			/* user or kernel? */
 
+#ifndef CONFIG_BIGMEM
+	/* kernel address: proto-VSID = ESID */
+	/* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
+	 * this code will generate the protoVSID 0xfffffffff for the
+	 * top segment.  That's ok, the scramble below will translate
+	 * it to VSID 0, which is reserved as a bad VSID - one which
+	 * will never have any pages in it.  */
+
+#endif
 	/* Check if hitting the linear mapping or some other kernel space
 	*/
 	bne	cr7,1f
@@ -96,12 +115,14 @@ _GLOBAL(slb_allocate_realmode)
 	 */
 _GLOBAL(slb_miss_kernel_load_linear)
 	li	r11,0
+#ifdef CONFIG_BIGMEM
 	/*
 	 * context = (ea >> 60) - (0xc - 1)
 	 * r9 = region id.
 	 */
 	subi	r9,r9,KERNEL_REGION_CONTEXT_OFFSET
 
+#endif
 BEGIN_FTR_SECTION
 	b	slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -131,22 +152,33 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
 	_GLOBAL(slb_miss_kernel_load_io)
 	li	r11,0
 6:
+#ifdef CONFIG_BIGMEM
 	/*
 	 * context = (ea >> 60) - (0xc - 1)
 	 * r9 = region id.
 	 */
 	subi	r9,r9,KERNEL_REGION_CONTEXT_OFFSET
 
+#endif
 BEGIN_FTR_SECTION
 	b	slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
 	b	slb_finish_load_1T
 
+#ifndef CONFIG_BIGMEM
+0:	/* user address: proto-VSID = context << 15 | ESID. First check
+	 * if the address is within the boundaries of the user region
+	 */
+	srdi.	r9,r10,USER_ESID_BITS
+	bne-	8f			/* invalid ea bits set */
+
+#else
 0:	/*.
 	 * For userspace addresses, make sure this is region 0.
 	.*/
 	cmpdi	r9, 0
 	bne	8f
+#endif
 
 	/* when using slices, we extract the psize off the slice bitmaps
 	 * and then we need to get the sllp encoding off the mmu_psize_defs
@@ -159,10 +191,24 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG
 	 * between 4k and 64k standard page size
 	 */
 #ifdef CONFIG_PPC_MM_SLICES
+#ifdef CONFIG_BIGMEM
 	/* r10 have esid */
+#endif
 	cmpldi	r10,16
+#ifndef CONFIG_BIGMEM
+
+	/* Get the slice index * 4 in r11 and matching slice size mask in r9 */
+	ld	r9,PACALOWSLICESPSIZE(r13)
+	sldi	r11,r10,2
+#else
 	/* below SLICE_LOW_TOP */
+#endif
 	blt	5f
+#ifndef CONFIG_BIGMEM
+	ld	r9,PACAHIGHSLICEPSIZE(r13)
+	srdi	r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
+	andi.	r11,r11,0x3c
+#else
 	/*
 	 * Handle hpsizes,
 	 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
@@ -173,7 +219,11 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG
 	/* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
 	rldicl	r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
 	b	6f
+#endif
 
+#ifndef CONFIG_BIGMEM
+5:	/* Extract the psize and multiply to get an array offset */
+#else
 5:
 	/*
 	 * Handle lpsizes
@@ -184,6 +234,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG
 6:
 	sldi	r11,r11,2  /* index * 4 */
 	/* Extract the psize and multiply to get an array offset */
+#endif
 	srd	r9,r9,r11
 	andi.	r9,r9,0xf
 	mulli	r9,r9,MMUPSIZEDEFSIZE
@@ -203,13 +254,20 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEG
 	ld	r9,PACACONTEXTID(r13)
 BEGIN_FTR_SECTION
 	cmpldi	r10,0x1000
+#ifndef CONFIG_BIGMEM
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+	rldimi	r10,r9,USER_ESID_BITS,0
+BEGIN_FTR_SECTION
+#endif
 	bge	slb_finish_load_1T
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 	b	slb_finish_load
 
 8:	/* invalid EA */
 	li	r10,0			/* BAD_VSID */
+#ifdef CONFIG_BIGMEM
 	li	r9,0			/* BAD_VSID */
+#endif
 	li	r11,SLB_VSID_USER	/* flags don't much matter */
 	b	slb_finish_load
 
@@ -258,6 +316,10 @@ _GLOBAL(slb_allocate_user)
 
 	/* get context to calculate proto-VSID */
 	ld	r9,PACACONTEXTID(r13)
+#ifndef CONFIG_BIGMEM
+	rldimi	r10,r9,USER_ESID_BITS,0
+
+#endif
 	/* fall through slb_finish_load */
 
 #endif /* __DISABLED__ */
@@ -266,11 +328,21 @@ _GLOBAL(slb_allocate_user)
 /*
  * Finish loading of an SLB entry and return
  *
+#ifndef CONFIG_BIGMEM
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+#else
  * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+#endif
  */
 slb_finish_load:
+#ifndef CONFIG_BIGMEM
+	ASM_VSID_SCRAMBLE(r10,r9,256M)
+	rldimi	r11,r10,SLB_VSID_SHIFT,16	/* combine VSID and flags */
+
+#else
 	rldimi  r10,r9,ESID_BITS,0
 	ASM_VSID_SCRAMBLE(r10,r9,r11,256M)
+#endif
 	/* r3 = EA, r11 = VSID data */
 	/*
 	 * Find a slot, round robin. Previously we tried to find a
@@ -327,10 +399,17 @@ _GLOBAL(slb_compare_rr_to_size)
 	bge	1f
 
 	/* still room in the slb cache */
+#ifndef CONFIG_BIGMEM
+	sldi	r11,r3,1		/* r11 = offset * sizeof(u16) */
+	rldicl	r10,r10,36,28		/* get low 16 bits of the ESID */
+	add	r11,r11,r13		/* r11 = (u16 *)paca + offset */
+	sth	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
+#else
 	sldi	r11,r3,2		/* r11 = offset * sizeof(u32) */
 	srdi    r10,r10,28		/* get the 36 bits of the ESID */
 	add	r11,r11,r13		/* r11 = (u32 *)paca + offset */
 	stw	r10,PACASLBCACHE(r11)	/* paca->slb_cache[offset] = esid */
+#endif
 	addi	r3,r3,1			/* offset++ */
 	b	2f
 1:					/* offset >= SLB_CACHE_ENTRIES */
@@ -344,13 +423,23 @@ _GLOBAL(slb_compare_rr_to_size)
  * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
  * We assume legacy iSeries will never have 1T segments.
  *
+#ifndef CONFIG_BIGMEM
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
+#else
  * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
+#endif
  */
 slb_finish_load_1T:
+#ifndef CONFIG_BIGMEM
+	srdi	r10,r10,40-28		/* get 1T ESID */
+	ASM_VSID_SCRAMBLE(r10,r9,1T)
+	rldimi	r11,r10,SLB_VSID_SHIFT_1T,16	/* combine VSID and flags */
+#else
 	srdi	r10,r10,(SID_SHIFT_1T - SID_SHIFT)	/* get 1T ESID */
 	rldimi  r10,r9,ESID_BITS_1T,0
 	ASM_VSID_SCRAMBLE(r10,r9,r11,1T)
 
+#endif
 	li	r10,MMU_SEGSIZE_1T
 	rldimi	r11,r10,SLB_VSID_SSIZE_SHIFT,0	/* insert segment size */
 
diff -urp a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -35,6 +35,9 @@
 #include <asm/spu.h>
 
 static DEFINE_SPINLOCK(slice_convert_lock);
+#ifndef CONFIG_BIGMEM
+
+#else
 /*
  * One bit per slice. We have lower slices which cover 256MB segments
  * upto 4G range. That gets us 16 low slices. For the rest we track slices
@@ -44,19 +47,43 @@ struct slice_mask {
 	u64 low_slices;
 	DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
 };
+#endif
 
 #ifdef DEBUG
 int _slice_debug = 1;
 
 static void slice_print_mask(const char *label, struct slice_mask mask)
 {
+#ifndef CONFIG_BIGMEM
+	char	*p, buf[16 + 3 + 16 + 1];
+	int	i;
+
+#endif
 	if (!_slice_debug)
 		return;
+#ifndef CONFIG_BIGMEM
+	p = buf;
+	for (i = 0; i < SLICE_NUM_LOW; i++)
+		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
+	*(p++) = ' ';
+	*(p++) = '-';
+	*(p++) = ' ';
+	for (i = 0; i < SLICE_NUM_HIGH; i++)
+		*(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
+	*(p++) = 0;
+
+	printk(KERN_DEBUG "%s:%s\n", label, buf);
+#else
 	pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices);
 	pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices);
+#endif
 }
 
+#ifndef CONFIG_BIGMEM
+#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
+#else
 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
+#endif
 
 #else
 
@@ -65,28 +92,57 @@ static void slice_print_mask(const char
 
 #endif
 
+#ifndef CONFIG_BIGMEM
+static struct slice_mask slice_range_to_mask(unsigned long start,
+					     unsigned long len)
+#else
 static void slice_range_to_mask(unsigned long start, unsigned long len,
 				struct slice_mask *ret)
+#endif
 {
 	unsigned long end = start + len - 1;
+#ifndef CONFIG_BIGMEM
+	struct slice_mask ret = { 0, 0 };
+#else
 
 	ret->low_slices = 0;
 	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+#endif
 
 	if (start < SLICE_LOW_TOP) {
+#ifndef CONFIG_BIGMEM
+		unsigned long mend = min(end, SLICE_LOW_TOP);
+		unsigned long mstart = min(start, SLICE_LOW_TOP);
+#else
 		unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
+#endif
 
+#ifndef CONFIG_BIGMEM
+		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+			- (1u << GET_LOW_SLICE_INDEX(mstart));
+#else
 		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 			- (1u << GET_LOW_SLICE_INDEX(start));
+#endif
 	}
 
+#ifndef CONFIG_BIGMEM
+	if ((start + len) > SLICE_LOW_TOP)
+		ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
+			- (1u << GET_HIGH_SLICE_INDEX(start));
+#else
 	if ((start + len) > SLICE_LOW_TOP) {
 		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
+#endif
 
+#ifndef CONFIG_BIGMEM
+	return ret;
+#else
 		bitmap_set(ret->high_slices, start_index, count);
 	}
+#endif
 }
 
 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -120,40 +176,92 @@ static int slice_high_has_vma(struct mm_
 	return !slice_area_is_free(mm, start, end - start);
 }
 
+#ifndef CONFIG_BIGMEM
+static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
+#else
 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+#endif
 {
+#ifndef CONFIG_BIGMEM
+	struct slice_mask ret = { 0, 0 };
+#endif
 	unsigned long i;
 
+#ifdef CONFIG_BIGMEM
 	ret->low_slices = 0;
 	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 
+#endif
 	for (i = 0; i < SLICE_NUM_LOW; i++)
 		if (!slice_low_has_vma(mm, i))
+#ifndef CONFIG_BIGMEM
+			ret.low_slices |= 1u << i;
+#else
 			ret->low_slices |= 1u << i;
+#endif
 
 	if (mm->task_size <= SLICE_LOW_TOP)
+#ifndef CONFIG_BIGMEM
+		return ret;
+#else
 		return;
+#endif
 
 	for (i = 0; i < SLICE_NUM_HIGH; i++)
 		if (!slice_high_has_vma(mm, i))
+#ifndef CONFIG_BIGMEM
+			ret.high_slices |= 1u << i;
+
+	return ret;
+#else
 			__set_bit(i, ret->high_slices);
+#endif
 }
 
+#ifndef CONFIG_BIGMEM
+static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
+#else
 static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+#endif
 {
+#ifndef CONFIG_BIGMEM
+	struct slice_mask ret = { 0, 0 };
+#else
 	unsigned char *hpsizes;
 	int index, mask_index;
+#endif
 	unsigned long i;
+#ifndef CONFIG_BIGMEM
+	u64 psizes;
+#else
 	u64 lpsizes;
+#endif
 
+#ifndef CONFIG_BIGMEM
+	psizes = mm->context.low_slices_psize;
+	for (i = 0; i < SLICE_NUM_LOW; i++)
+		if (((psizes >> (i * 4)) & 0xf) == psize)
+			ret.low_slices |= 1u << i;
+#else
 	ret->low_slices = 0;
 	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	psizes = mm->context.high_slices_psize;
+	for (i = 0; i < SLICE_NUM_HIGH; i++)
+		if (((psizes >> (i * 4)) & 0xf) == psize)
+			ret.high_slices |= 1u << i;
+#else
 	lpsizes = mm->context.low_slices_psize;
 	for (i = 0; i < SLICE_NUM_LOW; i++)
 		if (((lpsizes >> (i * 4)) & 0xf) == psize)
 			ret->low_slices |= 1u << i;
+#endif
 
+#ifndef CONFIG_BIGMEM
+	return ret;
+#else
 	hpsizes = mm->context.high_slices_psize;
 	for (i = 0; i < SLICE_NUM_HIGH; i++) {
 		mask_index = i & 0x1;
@@ -161,17 +269,24 @@ static void slice_mask_for_size(struct m
 		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
 			__set_bit(i, ret->high_slices);
 	}
+#endif
 }
 
 static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
 {
+#ifdef CONFIG_BIGMEM
 	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
 
 	bitmap_and(result, mask.high_slices,
 		   available.high_slices, SLICE_NUM_HIGH);
 
+#endif
 	return (mask.low_slices & available.low_slices) == mask.low_slices &&
+#ifndef CONFIG_BIGMEM
+		(mask.high_slices & available.high_slices) == mask.high_slices;
+#else
 		bitmap_equal(result, mask.high_slices, SLICE_NUM_HIGH);
+#endif
 }
 
 static void slice_flush_segments(void *parm)
@@ -192,10 +307,16 @@ static void slice_flush_segments(void *p
 
 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
 {
+#ifdef CONFIG_BIGMEM
 	int index, mask_index;
+#endif
 	/* Write the new slice psize bits */
+#ifndef CONFIG_BIGMEM
+	u64 lpsizes, hpsizes;
+#else
 	unsigned char *hpsizes;
 	u64 lpsizes;
+#endif
 	unsigned long i, flags;
 
 	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
@@ -212,8 +333,20 @@ static void slice_convert(struct mm_stru
 			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
 				(((unsigned long)psize) << (i * 4));
 
+#ifndef CONFIG_BIGMEM
+	hpsizes = mm->context.high_slices_psize;
+	for (i = 0; i < SLICE_NUM_HIGH; i++)
+		if (mask.high_slices & (1u << i))
+			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
+				(((unsigned long)psize) << (i * 4));
+
+#else
 	/* Assign the value back */
+#endif
 	mm->context.low_slices_psize = lpsizes;
+#ifndef CONFIG_BIGMEM
+	mm->context.high_slices_psize = hpsizes;
+#else
 
 	hpsizes = mm->context.high_slices_psize;
 	for (i = 0; i < SLICE_NUM_HIGH; i++) {
@@ -224,10 +357,16 @@ static void slice_convert(struct mm_stru
 					  ~(0xf << (mask_index * 4))) |
 				(((unsigned long)psize) << (mask_index * 4));
 	}
+#endif
 
 	slice_dbg(" lsps=%lx, hsps=%lx\n",
+#ifndef CONFIG_BIGMEM
+		  mm->context.low_slices_psize,
+		  mm->context.high_slices_psize);
+#else
 		  (unsigned long)mm->context.low_slices_psize,
 		  (unsigned long)mm->context.high_slices_psize);
+#endif
 
 	spin_unlock_irqrestore(&slice_convert_lock, flags);
 
@@ -263,7 +402,11 @@ full_search:
 		vma = find_vma(mm, addr);
 		BUG_ON(vma && (addr >= vma->vm_end));
 
+#ifndef CONFIG_BIGMEM
+		mask = slice_range_to_mask(addr, len);
+#else
 		slice_range_to_mask(addr, len, &mask);
+#endif
 		if (!slice_check_fit(mask, available)) {
 			if (addr < SLICE_LOW_TOP)
 				addr = _ALIGN_UP(addr + 1,  1ul << SLICE_LOW_SHIFT);
@@ -318,7 +461,11 @@ static unsigned long slice_find_area_top
 		/* make sure it can fit in the remaining address space */
 		if (addr > len) {
 			addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
+#ifndef CONFIG_BIGMEM
+			mask = slice_range_to_mask(addr, len);
+#else
 			slice_range_to_mask(addr, len, &mask);
+#endif
 			if (slice_check_fit(mask, available) &&
 			    slice_area_is_free(mm, addr, len))
 					/* remember the address as a hint for
@@ -334,7 +481,11 @@ static unsigned long slice_find_area_top
 		addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
 
 		/* Check for hit with different page size */
+#ifndef CONFIG_BIGMEM
+		mask = slice_range_to_mask(addr, len);
+#else
 		slice_range_to_mask(addr, len, &mask);
+#endif
 		if (!slice_check_fit(mask, available)) {
 			if (addr < SLICE_LOW_TOP)
 				addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
@@ -396,10 +547,23 @@ static unsigned long slice_find_area(str
 		return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
 }
 
+#ifndef CONFIG_BIGMEM
+#define or_mask(dst, src)	do {			\
+	(dst).low_slices |= (src).low_slices;		\
+	(dst).high_slices |= (src).high_slices;		\
+} while (0)
+#else
 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
 {
 	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+#endif
 
+#ifndef CONFIG_BIGMEM
+#define andnot_mask(dst, src)	do {			\
+	(dst).low_slices &= ~(src).low_slices;		\
+	(dst).high_slices &= ~(src).high_slices;	\
+} while (0)
+#else
 	dst->low_slices |= src->low_slices;
 	bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
 	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
@@ -414,6 +578,7 @@ static inline void slice_andnot_mask(str
 	bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
 	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
 }
+#endif
 
 #ifdef CONFIG_PPC_64K_PAGES
 #define MMU_PAGE_BASE	MMU_PAGE_64K
@@ -425,15 +590,25 @@ unsigned long slice_get_unmapped_area(un
 				      unsigned long flags, unsigned int psize,
 				      int topdown, int use_cache)
 {
+#ifndef CONFIG_BIGMEM
+	struct slice_mask mask = {0, 0};
+#else
 	struct slice_mask mask;
+#endif
 	struct slice_mask good_mask;
+#ifndef CONFIG_BIGMEM
+	struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
+	struct slice_mask compat_mask = {0, 0};
+#else
 	struct slice_mask potential_mask;
 	struct slice_mask compat_mask;
+#endif
 	int fixed = (flags & MAP_FIXED);
 	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 	struct mm_struct *mm = current->mm;
 	unsigned long newaddr;
 
+#ifdef CONFIG_BIGMEM
 	/*
 	 * init different masks
 	 */
@@ -447,6 +622,7 @@ unsigned long slice_get_unmapped_area(un
 	compat_mask.low_slices = 0;
 	bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
 
+#endif
 	/* Sanity checks */
 	BUG_ON(mm->task_size == 0);
 
@@ -476,7 +652,11 @@ unsigned long slice_get_unmapped_area(un
 	/* First make up a "good" mask of slices that have the right size
 	 * already
 	 */
+#ifndef CONFIG_BIGMEM
+	good_mask = slice_mask_for_size(mm, psize);
+#else
 	slice_mask_for_size(mm, psize, &good_mask);
+#endif
 	slice_print_mask(" good_mask", good_mask);
 
 	/*
@@ -501,16 +681,28 @@ unsigned long slice_get_unmapped_area(un
 #ifdef CONFIG_PPC_64K_PAGES
 	/* If we support combo pages, we can allow 64k pages in 4k slices */
 	if (psize == MMU_PAGE_64K) {
+#ifndef CONFIG_BIGMEM
+		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
+#else
 		slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+#endif
 		if (fixed)
+#ifndef CONFIG_BIGMEM
+			or_mask(good_mask, compat_mask);
+#else
 			slice_or_mask(&good_mask, &compat_mask);
+#endif
 	}
 #endif
 
 	/* First check hint if it's valid or if we have MAP_FIXED */
 	if (addr != 0 || fixed) {
 		/* Build a mask for the requested range */
+#ifndef CONFIG_BIGMEM
+		mask = slice_range_to_mask(addr, len);
+#else
 		slice_range_to_mask(addr, len, &mask);
+#endif
 		slice_print_mask(" mask", mask);
 
 		/* Check if we fit in the good mask. If we do, we just return,
@@ -538,8 +730,13 @@ unsigned long slice_get_unmapped_area(un
 	/* We don't fit in the good mask, check what other slices are
 	 * empty and thus can be converted
 	 */
+#ifndef CONFIG_BIGMEM
+	potential_mask = slice_mask_for_free(mm);
+	or_mask(potential_mask, good_mask);
+#else
 	slice_mask_for_free(mm, &potential_mask);
 	slice_or_mask(&potential_mask, &good_mask);
+#endif
 	slice_print_mask(" potential", potential_mask);
 
 	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
@@ -574,7 +771,11 @@ unsigned long slice_get_unmapped_area(un
 #ifdef CONFIG_PPC_64K_PAGES
 	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
 		/* retry the search with 4k-page slices included */
+#ifndef CONFIG_BIGMEM
+		or_mask(potential_mask, compat_mask);
+#else
 		slice_or_mask(&potential_mask, &compat_mask);
+#endif
 		addr = slice_find_area(mm, len, potential_mask, psize,
 				       topdown, use_cache);
 	}
@@ -583,14 +784,24 @@ unsigned long slice_get_unmapped_area(un
 	if (addr == -ENOMEM)
 		return -ENOMEM;
 
+#ifndef CONFIG_BIGMEM
+	mask = slice_range_to_mask(addr, len);
+#else
 	slice_range_to_mask(addr, len, &mask);
+#endif
 	slice_dbg(" found potential area at 0x%lx\n", addr);
 	slice_print_mask(" mask", mask);
 
  convert:
+#ifndef CONFIG_BIGMEM
+	andnot_mask(mask, good_mask);
+	andnot_mask(mask, compat_mask);
+	if (mask.low_slices || mask.high_slices) {
+#else
 	slice_andnot_mask(&mask, &good_mask);
 	slice_andnot_mask(&mask, &compat_mask);
 	if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
+#endif
 		slice_convert(mm, mask, psize);
 		if (psize > MMU_PAGE_BASE)
 			on_each_cpu(slice_flush_segments, mm, 1);
@@ -624,19 +835,39 @@ unsigned long arch_get_unmapped_area_top
 
 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
 {
+#ifndef CONFIG_BIGMEM
+	u64 psizes;
+	int index;
+#else
 	unsigned char *hpsizes;
 	int index, mask_index;
+#endif
 
 	if (addr < SLICE_LOW_TOP) {
+#ifndef CONFIG_BIGMEM
+		psizes = mm->context.low_slices_psize;
+#else
 		u64 lpsizes;
 		lpsizes = mm->context.low_slices_psize;
+#endif
 		index = GET_LOW_SLICE_INDEX(addr);
+#ifndef CONFIG_BIGMEM
+	} else {
+		psizes = mm->context.high_slices_psize;
+		index = GET_HIGH_SLICE_INDEX(addr);
+#else
 		return (lpsizes >> (index * 4)) & 0xf;
+#endif
 	}
+#ifndef CONFIG_BIGMEM
+
+	return (psizes >> (index * 4)) & 0xf;
+#else
 	hpsizes = mm->context.high_slices_psize;
 	index = GET_HIGH_SLICE_INDEX(addr);
 	mask_index = index & 0x1;
 	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
+#endif
 }
 EXPORT_SYMBOL_GPL(get_slice_psize);
 
@@ -656,9 +887,13 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
  */
 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long flags, lpsizes, hpsizes;
+#else
 	int index, mask_index;
 	unsigned char *hpsizes;
 	unsigned long flags, lpsizes;
+#endif
 	unsigned int old_psize;
 	int i;
 
@@ -679,10 +914,18 @@ void slice_set_user_psize(struct mm_stru
 		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
 			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
 				(((unsigned long)psize) << (i * 4));
+#ifdef CONFIG_BIGMEM
 	/* Assign the value back */
 	mm->context.low_slices_psize = lpsizes;
+#endif
 
 	hpsizes = mm->context.high_slices_psize;
+#ifndef CONFIG_BIGMEM
+	for (i = 0; i < SLICE_NUM_HIGH; i++)
+		if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
+			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
+				(((unsigned long)psize) << (i * 4));
+#else
 	for (i = 0; i < SLICE_NUM_HIGH; i++) {
 		mask_index = i & 0x1;
 		index = i >> 1;
@@ -693,11 +936,21 @@ void slice_set_user_psize(struct mm_stru
 	}
 
 
+#endif
 
+#ifndef CONFIG_BIGMEM
+	mm->context.low_slices_psize = lpsizes;
+	mm->context.high_slices_psize = hpsizes;
+#endif
 
 	slice_dbg(" lsps=%lx, hsps=%lx\n",
+#ifndef CONFIG_BIGMEM
+		  mm->context.low_slices_psize,
+		  mm->context.high_slices_psize);
+#else
 		  (unsigned long)mm->context.low_slices_psize,
 		  (unsigned long)mm->context.high_slices_psize);
+#endif
 
  bail:
 	spin_unlock_irqrestore(&slice_convert_lock, flags);
@@ -706,27 +959,47 @@ void slice_set_user_psize(struct mm_stru
 void slice_set_psize(struct mm_struct *mm, unsigned long address,
 		     unsigned int psize)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned char *hpsizes;
+#endif
 	unsigned long i, flags;
+#ifndef CONFIG_BIGMEM
+	u64 *p;
+#else
 	u64 *lpsizes;
+#endif
 
 	spin_lock_irqsave(&slice_convert_lock, flags);
 	if (address < SLICE_LOW_TOP) {
 		i = GET_LOW_SLICE_INDEX(address);
+#ifndef CONFIG_BIGMEM
+		p = &mm->context.low_slices_psize;
+#else
 		lpsizes = &mm->context.low_slices_psize;
 		*lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
 			((unsigned long) psize << (i * 4));
+#endif
 	} else {
+#ifdef CONFIG_BIGMEM
 		int index, mask_index;
+#endif
 		i = GET_HIGH_SLICE_INDEX(address);
+#ifndef CONFIG_BIGMEM
+		p = &mm->context.high_slices_psize;
+#else
 		hpsizes = mm->context.high_slices_psize;
 		mask_index = i & 0x1;
 		index = i >> 1;
 		hpsizes[index] = (hpsizes[index] &
 				  ~(0xf << (mask_index * 4))) |
 			(((unsigned long)psize) << (mask_index * 4));
+#endif
 	}
+#ifndef CONFIG_BIGMEM
+	*p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
+#else
 
+#endif
 	spin_unlock_irqrestore(&slice_convert_lock, flags);
 
 #ifdef CONFIG_SPU_BASE
@@ -737,9 +1010,15 @@ void slice_set_psize(struct mm_struct *m
 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
 			   unsigned long len, unsigned int psize)
 {
+#ifndef CONFIG_BIGMEM
+	struct slice_mask mask = slice_range_to_mask(start, len);
+#else
 	struct slice_mask mask;
+#endif
 
+#ifdef CONFIG_BIGMEM
 	slice_range_to_mask(start, len, &mask);
+#endif
 	slice_convert(mm, mask, psize);
 }
 
@@ -768,14 +1047,24 @@ int is_hugepage_only_range(struct mm_str
 	struct slice_mask mask, available;
 	unsigned int psize = mm->context.user_psize;
 
+#ifndef CONFIG_BIGMEM
+	mask = slice_range_to_mask(addr, len);
+	available = slice_mask_for_size(mm, psize);
+#else
 	slice_range_to_mask(addr, len, &mask);
 	slice_mask_for_size(mm, psize, &available);
+#endif
 #ifdef CONFIG_PPC_64K_PAGES
 	/* We need to account for 4k slices too */
 	if (psize == MMU_PAGE_64K) {
 		struct slice_mask compat_mask;
+#ifndef CONFIG_BIGMEM
+		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
+		or_mask(available, compat_mask);
+#else
 		slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
 		slice_or_mask(&available, &compat_mask);
+#endif
 	}
 #endif
 
diff -urp a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -0,0 +1,298 @@
+#ifndef CONFIG_BIGMEM
+/*
+ * PowerPC64 Segment Translation Support.
+ *
+ * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
+ *    Copyright (c) 2001 Dave Engebretsen
+ *
+ * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/memblock.h>
+
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+#include <asm/paca.h>
+#include <asm/cputable.h>
+#include <asm/prom.h>
+#include <asm/abs_addr.h>
+#include <asm/firmware.h>
+#include <asm/iseries/hv_call.h>
+
+struct stab_entry {
+	unsigned long esid_data;
+	unsigned long vsid_data;
+};
+
+#define NR_STAB_CACHE_ENTRIES 8
+static DEFINE_PER_CPU(long, stab_cache_ptr);
+static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
+
+/*
+ * Create a segment table entry for the given esid/vsid pair.
+ */
+static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
+{
+	unsigned long esid_data, vsid_data;
+	unsigned long entry, group, old_esid, castout_entry, i;
+	unsigned int global_entry;
+	struct stab_entry *ste, *castout_ste;
+	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
+
+	vsid_data = vsid << STE_VSID_SHIFT;
+	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
+	if (! kernel_segment)
+		esid_data |= STE_ESID_KS;
+
+	/* Search the primary group first. */
+	global_entry = (esid & 0x1f) << 3;
+	ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
+
+	/* Find an empty entry, if one exists. */
+	for (group = 0; group < 2; group++) {
+		for (entry = 0; entry < 8; entry++, ste++) {
+			if (!(ste->esid_data & STE_ESID_V)) {
+				ste->vsid_data = vsid_data;
+				eieio();
+				ste->esid_data = esid_data;
+				return (global_entry | entry);
+			}
+		}
+		/* Now search the secondary group. */
+		global_entry = ((~esid) & 0x1f) << 3;
+		ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
+	}
+
+	/*
+	 * Could not find empty entry, pick one with a round robin selection.
+	 * Search all entries in the two groups.
+	 */
+	castout_entry = get_paca()->stab_rr;
+	for (i = 0; i < 16; i++) {
+		if (castout_entry < 8) {
+			global_entry = (esid & 0x1f) << 3;
+			ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
+			castout_ste = ste + castout_entry;
+		} else {
+			global_entry = ((~esid) & 0x1f) << 3;
+			ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
+			castout_ste = ste + (castout_entry - 8);
+		}
+
+		/* Dont cast out the first kernel segment */
+		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
+			break;
+
+		castout_entry = (castout_entry + 1) & 0xf;
+	}
+
+	get_paca()->stab_rr = (castout_entry + 1) & 0xf;
+
+	/* Modify the old entry to the new value. */
+
+	/* Force previous translations to complete. DRENG */
+	asm volatile("isync" : : : "memory");
+
+	old_esid = castout_ste->esid_data >> SID_SHIFT;
+	castout_ste->esid_data = 0;		/* Invalidate old entry */
+
+	asm volatile("sync" : : : "memory");    /* Order update */
+
+	castout_ste->vsid_data = vsid_data;
+	eieio();				/* Order update */
+	castout_ste->esid_data = esid_data;
+
+	asm volatile("slbie  %0" : : "r" (old_esid << SID_SHIFT));
+	/* Ensure completion of slbie */
+	asm volatile("sync" : : : "memory");
+
+	return (global_entry | (castout_entry & 0x7));
+}
+
+/*
+ * Allocate a segment table entry for the given ea and mm
+ */
+static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
+{
+	unsigned long vsid;
+	unsigned char stab_entry;
+	unsigned long offset;
+
+	/* Kernel or user address? */
+	if (is_kernel_addr(ea)) {
+		vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
+	} else {
+		if ((ea >= TASK_SIZE_USER64) || (! mm))
+			return 1;
+
+		vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
+	}
+
+	stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
+
+	if (!is_kernel_addr(ea)) {
+		offset = __get_cpu_var(stab_cache_ptr);
+		if (offset < NR_STAB_CACHE_ENTRIES)
+			__get_cpu_var(stab_cache[offset++]) = stab_entry;
+		else
+			offset = NR_STAB_CACHE_ENTRIES+1;
+		__get_cpu_var(stab_cache_ptr) = offset;
+
+		/* Order update */
+		asm volatile("sync":::"memory");
+	}
+
+	return 0;
+}
+
+int ste_allocate(unsigned long ea)
+{
+	return __ste_allocate(ea, current->mm);
+}
+
+/*
+ * Do the segment table work for a context switch: flush all user
+ * entries from the table, then preload some probably useful entries
+ * for the new task
+ */
+void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
+{
+	struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
+	struct stab_entry *ste;
+	unsigned long offset;
+	unsigned long pc = KSTK_EIP(tsk);
+	unsigned long stack = KSTK_ESP(tsk);
+	unsigned long unmapped_base;
+
+	/* Force previous translations to complete. DRENG */
+	asm volatile("isync" : : : "memory");
+
+	/*
+	 * We need interrupts hard-disabled here, not just soft-disabled,
+	 * so that a PMU interrupt can't occur, which might try to access
+	 * user memory (to get a stack trace) and possible cause an STAB miss
+	 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
+	 */
+	hard_irq_disable();
+
+	offset = __get_cpu_var(stab_cache_ptr);
+	if (offset <= NR_STAB_CACHE_ENTRIES) {
+		int i;
+
+		for (i = 0; i < offset; i++) {
+			ste = stab + __get_cpu_var(stab_cache[i]);
+			ste->esid_data = 0; /* invalidate entry */
+		}
+	} else {
+		unsigned long entry;
+
+		/* Invalidate all entries. */
+		ste = stab;
+
+		/* Never flush the first entry. */
+		ste += 1;
+		for (entry = 1;
+		     entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
+		     entry++, ste++) {
+			unsigned long ea;
+			ea = ste->esid_data & ESID_MASK;
+			if (!is_kernel_addr(ea)) {
+				ste->esid_data = 0;
+			}
+		}
+	}
+
+	asm volatile("sync; slbia; sync":::"memory");
+
+	__get_cpu_var(stab_cache_ptr) = 0;
+
+	/* Now preload some entries for the new task */
+	if (test_tsk_thread_flag(tsk, TIF_32BIT))
+		unmapped_base = TASK_UNMAPPED_BASE_USER32;
+	else
+		unmapped_base = TASK_UNMAPPED_BASE_USER64;
+
+	__ste_allocate(pc, mm);
+
+	if (GET_ESID(pc) == GET_ESID(stack))
+		return;
+
+	__ste_allocate(stack, mm);
+
+	if ((GET_ESID(pc) == GET_ESID(unmapped_base))
+	    || (GET_ESID(stack) == GET_ESID(unmapped_base)))
+		return;
+
+	__ste_allocate(unmapped_base, mm);
+
+	/* Order update */
+	asm volatile("sync" : : : "memory");
+}
+
+/*
+ * Allocate segment tables for secondary CPUs.  These must all go in
+ * the first (bolted) segment, so that do_stab_bolted won't get a
+ * recursive segment miss on the segment table itself.
+ */
+void __init stabs_alloc(void)
+{
+	int cpu;
+
+	if (mmu_has_feature(MMU_FTR_SLB))
+		return;
+
+	for_each_possible_cpu(cpu) {
+		unsigned long newstab;
+
+		if (cpu == 0)
+			continue; /* stab for CPU 0 is statically allocated */
+
+		newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
+					 1<<SID_SHIFT);
+		newstab = (unsigned long)__va(newstab);
+
+		memset((void *)newstab, 0, HW_PAGE_SIZE);
+
+		paca[cpu].stab_addr = newstab;
+		paca[cpu].stab_real = virt_to_abs(newstab);
+		printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
+		       "virtual, 0x%llx absolute\n",
+		       cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
+	}
+}
+
+/*
+ * Build an entry for the base kernel segment and put it into
+ * the segment table or SLB.  All other segment table or SLB
+ * entries are faulted in.
+ */
+void stab_initialize(unsigned long stab)
+{
+	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
+	unsigned long stabreal;
+
+	asm volatile("isync; slbia; isync":::"memory");
+	make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
+
+	/* Order update */
+	asm volatile("sync":::"memory");
+
+	/* Set ASR */
+	stabreal = get_paca()->stab_real | 0x1ul;
+
+#ifdef CONFIG_PPC_ISERIES
+	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+		HvCall1(HvCallBaseSetASR, stabreal);
+		return;
+	}
+#endif /* CONFIG_PPC_ISERIES */
+
+	mtspr(SPRN_ASR, stabreal);
+}
+#endif
diff -urp a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,9 +42,15 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, p
 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, unsigned long pte, int huge)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long vpn;
+#endif
 	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
+#ifndef CONFIG_BIGMEM
+	unsigned long vsid, vaddr;
+#else
 	unsigned long vsid;
+#endif
 	unsigned int psize;
 	int ssize;
 	real_pte_t rpte;
@@ -82,12 +88,19 @@ void hpte_need_flush(struct mm_struct *m
 	if (!is_kernel_addr(addr)) {
 		ssize = user_segment_size(addr);
 		vsid = get_vsid(mm->context.id, addr, ssize);
+#ifndef CONFIG_BIGMEM
+		WARN_ON(vsid == 0);
+#endif
 	} else {
 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 		ssize = mmu_kernel_ssize;
 	}
+#ifndef CONFIG_BIGMEM
+	vaddr = hpt_va(addr, vsid, ssize);
+#else
 	WARN_ON(vsid == 0);
 	vpn = hpt_vpn(addr, vsid, ssize);
+#endif
 	rpte = __real_pte(__pte(pte), ptep);
 
 	/*
@@ -97,7 +110,11 @@ void hpte_need_flush(struct mm_struct *m
 	 * and decide to use local invalidates instead...
 	 */
 	if (!batch->active) {
+#ifndef CONFIG_BIGMEM
+		flush_hash_page(vaddr, rpte, psize, ssize, 0);
+#else
 		flush_hash_page(vpn, rpte, psize, ssize, 0);
+#endif
 		put_cpu_var(ppc64_tlb_batch);
 		return;
 	}
@@ -123,7 +140,11 @@ void hpte_need_flush(struct mm_struct *m
 		batch->ssize = ssize;
 	}
 	batch->pte[i] = rpte;
+#ifndef CONFIG_BIGMEM
+	batch->vaddr[i] = vaddr;
+#else
 	batch->vpn[i] = vpn;
+#endif
 	batch->index = ++i;
 	if (i >= PPC64_TLB_BATCH_NR)
 		__flush_tlb_pending(batch);
@@ -147,7 +168,11 @@ void __flush_tlb_pending(struct ppc64_tl
 	if (cpumask_equal(mm_cpumask(batch->mm), tmp))
 		local = 1;
 	if (i == 1)
+#ifndef CONFIG_BIGMEM
+		flush_hash_page(batch->vaddr[0], batch->pte[0],
+#else
 		flush_hash_page(batch->vpn[0], batch->pte[0],
+#endif
 				batch->psize, batch->ssize, local);
 	else
 		flush_hash_range(i, local);
diff -urp a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -88,7 +88,11 @@ static inline unsigned int beat_read_mas
 }
 
 static long beat_lpar_hpte_insert(unsigned long hpte_group,
+#ifndef CONFIG_BIGMEM
+				  unsigned long va, unsigned long pa,
+#else
 				  unsigned long vpn, unsigned long pa,
+#endif
 				  unsigned long rflags, unsigned long vflags,
 				  int psize, int ssize)
 {
@@ -104,7 +108,11 @@ static long beat_lpar_hpte_insert(unsign
 			"rflags=%lx, vflags=%lx, psize=%d)\n",
 		hpte_group, va, pa, rflags, vflags, psize);
 
+#ifndef CONFIG_BIGMEM
+	hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+#else
 	hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+#endif
 		vflags | HPTE_V_VALID;
 	hpte_r = hpte_encode_r(pa, psize) | rflags;
 
@@ -185,14 +193,22 @@ static void beat_lpar_hptab_clear(void)
  */
 static long beat_lpar_hpte_updatepp(unsigned long slot,
 				    unsigned long newpp,
+#ifndef CONFIG_BIGMEM
+				    unsigned long va,
+#else
 				    unsigned long vpn,
+#endif
 				    int psize, int ssize, int local)
 {
 	unsigned long lpar_rc;
 	u64 dummy0, dummy1;
 	unsigned long want_v;
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+#else
 	want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+#endif
 
 	DBG_LOW("    update: "
 		"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
@@ -221,15 +237,24 @@ static long beat_lpar_hpte_updatepp(unsi
 	return 0;
 }
 
+#ifndef CONFIG_BIGMEM
+static long beat_lpar_hpte_find(unsigned long va, int psize)
+#else
 static long beat_lpar_hpte_find(unsigned long vpn, int psize)
+#endif
 {
 	unsigned long hash;
 	unsigned long i, j;
 	long slot;
 	unsigned long want_v, hpte_v;
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
+	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+#else
 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
 	want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+#endif
 
 	for (j = 0; j < 2; j++) {
 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -256,15 +281,27 @@ static void beat_lpar_hpte_updateboltedp
 					  unsigned long ea,
 					  int psize, int ssize)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long lpar_rc, slot, vsid, va;
+#else
 	unsigned long vpn;
 	unsigned long lpar_rc, slot, vsid;
+#endif
 	u64 dummy0, dummy1;
 
 	vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
+#ifndef CONFIG_BIGMEM
+	va = (vsid << 28) | (ea & 0x0fffffff);
+#else
 	vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
+#endif
 
 	raw_spin_lock(&beat_htab_lock);
+#ifndef CONFIG_BIGMEM
+	slot = beat_lpar_hpte_find(va, psize);
+#else
 	slot = beat_lpar_hpte_find(vpn, psize);
+#endif
 	BUG_ON(slot == -1);
 
 	lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
@@ -274,7 +311,11 @@ static void beat_lpar_hpte_updateboltedp
 	BUG_ON(lpar_rc != 0);
 }
 
+#ifndef CONFIG_BIGMEM
+static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
+#else
 static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
+#endif
 					 int psize, int ssize, int local)
 {
 	unsigned long want_v;
@@ -284,7 +325,11 @@ static void beat_lpar_hpte_invalidate(un
 
 	DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
 		slot, va, psize, local);
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+#else
 	want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+#endif
 
 	raw_spin_lock_irqsave(&beat_htab_lock, flags);
 	dummy1 = beat_lpar_hpte_getword0(slot);
@@ -313,7 +358,11 @@ void __init hpte_init_beat(void)
 }
 
 static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
+#ifndef CONFIG_BIGMEM
+				  unsigned long va, unsigned long pa,
+#else
 				  unsigned long vpn, unsigned long pa,
+#endif
 				  unsigned long rflags, unsigned long vflags,
 				  int psize, int ssize)
 {
@@ -325,11 +374,23 @@ static long beat_lpar_hpte_insert_v3(uns
 		return -1;
 
 	if (!(vflags & HPTE_V_BOLTED))
+#ifndef CONFIG_BIGMEM
+		DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+#else
 		DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
+#endif
 			"rflags=%lx, vflags=%lx, psize=%d)\n",
+#ifndef CONFIG_BIGMEM
+		hpte_group, va, pa, rflags, vflags, psize);
+#else
 		hpte_group, vpn, pa, rflags, vflags, psize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) |
+#else
 	hpte_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M) |
+#endif
 		vflags | HPTE_V_VALID;
 	hpte_r = hpte_encode_r(pa, psize) | rflags;
 
@@ -367,14 +428,22 @@ static long beat_lpar_hpte_insert_v3(uns
  */
 static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
 				    unsigned long newpp,
+#ifndef CONFIG_BIGMEM
+				    unsigned long va,
+#else
 				    unsigned long vpn,
+#endif
 				    int psize, int ssize, int local)
 {
 	unsigned long lpar_rc;
 	unsigned long want_v;
 	unsigned long pss;
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+#else
 	want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+#endif
 	pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
 
 	DBG_LOW("    update: "
@@ -395,16 +464,26 @@ static long beat_lpar_hpte_updatepp_v3(u
 	return 0;
 }
 
+#ifndef CONFIG_BIGMEM
+static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va,
+#else
 static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
+#endif
 					 int psize, int ssize, int local)
 {
 	unsigned long want_v;
 	unsigned long lpar_rc;
 	unsigned long pss;
 
+#ifndef CONFIG_BIGMEM
+	DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
+		slot, va, psize, local);
+	want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
+#else
 	DBG_LOW("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
 		slot, vpn, psize, local);
 	want_v = hpte_encode_v(vpn, psize, MMU_SEGSIZE_256M);
+#endif
 	pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc;
 
 	lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
Only in a/arch/powerpc/platforms/cell: beat_htab.c.orig
diff -urp a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -43,7 +43,11 @@ enum ps3_lpar_vas_id {
 
 static DEFINE_SPINLOCK(ps3_htab_lock);
 
+#ifndef CONFIG_BIGMEM
+static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
+#else
 static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
+#endif
 	unsigned long pa, unsigned long rflags, unsigned long vflags,
 	int psize, int ssize)
 {
@@ -61,7 +65,11 @@ static long ps3_hpte_insert(unsigned lon
 	 */
 	vflags &= ~HPTE_V_SECONDARY;
 
+#ifndef CONFIG_BIGMEM
+	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+#else
 	hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
+#endif
 	hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
 
 	spin_lock_irqsave(&ps3_htab_lock, flags);
@@ -75,8 +83,13 @@ static long ps3_hpte_insert(unsigned lon
 
 	if (result) {
 		/* all entries bolted !*/
+#ifndef CONFIG_BIGMEM
+		pr_info("%s:result=%d va=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
+			__func__, result, va, pa, hpte_group, hpte_v, hpte_r);
+#else
 		pr_info("%s:result=%d vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
 			__func__, result, vpn, pa, hpte_group, hpte_v, hpte_r);
+#endif
 		BUG();
 	}
 
@@ -107,7 +120,11 @@ static long ps3_hpte_remove(unsigned lon
 }
 
 static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
+#ifndef CONFIG_BIGMEM
+	unsigned long va, int psize, int ssize, int local)
+#else
 	unsigned long vpn, int psize, int ssize, int local)
+#endif
 {
 	int result;
 	u64 hpte_v, want_v, hpte_rs;
@@ -115,7 +132,11 @@ static long ps3_hpte_updatepp(unsigned l
 	unsigned long flags;
 	long ret;
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_v(va, psize, ssize);
+#else
 	want_v = hpte_encode_v(vpn, psize, ssize);
+#endif
 
 	spin_lock_irqsave(&ps3_htab_lock, flags);
 
@@ -125,8 +146,13 @@ static long ps3_hpte_updatepp(unsigned l
 				       &hpte_rs);
 
 	if (result) {
+#ifndef CONFIG_BIGMEM
+		pr_info("%s: res=%d read va=%lx slot=%lx psize=%d\n",
+			__func__, result, va, slot, psize);
+#else
 		pr_info("%s: res=%d read vpn=%lx slot=%lx psize=%d\n",
 			__func__, result, vpn, slot, psize);
+#endif
 		BUG();
 	}
 
@@ -159,7 +185,11 @@ static void ps3_hpte_updateboltedpp(unsi
 	panic("ps3_hpte_updateboltedpp() not implemented");
 }
 
+#ifndef CONFIG_BIGMEM
+static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
+#else
 static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
+#endif
 	int psize, int ssize, int local)
 {
 	unsigned long flags;
@@ -170,8 +200,13 @@ static void ps3_hpte_invalidate(unsigned
 	result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
 
 	if (result) {
+#ifndef CONFIG_BIGMEM
+		pr_info("%s: res=%d va=%lx slot=%lx psize=%d\n",
+			__func__, result, va, slot, psize);
+#else
 		pr_info("%s: res=%d vpn=%lx slot=%lx psize=%d\n",
 			__func__, result, vpn, slot, psize);
+#endif
 		BUG();
 	}
 
diff -urp a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -298,9 +298,15 @@ void vpa_init(int cpu)
 }
 
 static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
+#ifndef CONFIG_BIGMEM
+ 			      unsigned long va, unsigned long pa,
+ 			      unsigned long rflags, unsigned long vflags,
+			      int psize, int ssize)
+#else
 				     unsigned long vpn, unsigned long pa,
 				     unsigned long rflags, unsigned long vflags,
 				     int psize, int ssize)
+#endif
 {
 	unsigned long lpar_rc;
 	unsigned long flags;
@@ -308,11 +314,21 @@ static long pSeries_lpar_hpte_insert(uns
 	unsigned long hpte_v, hpte_r;
 
 	if (!(vflags & HPTE_V_BOLTED))
+#ifndef CONFIG_BIGMEM
+		pr_devel("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+			 "rflags=%lx, vflags=%lx, psize=%d)\n",
+			 hpte_group, va, pa, rflags, vflags, psize);
+#else
 		pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
 			 "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
 			 hpte_group, vpn,  pa, rflags, vflags, psize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+#else
 	hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
+#endif
 	hpte_r = hpte_encode_r(pa, psize) | rflags;
 
 	if (!(vflags & HPTE_V_BOLTED))
@@ -434,7 +450,26 @@ static void pSeries_lpar_hptab_clear(voi
 		__pSeries_lpar_clear_hpt();
 }
 
+#ifndef CONFIG_BIGMEM
+/*
+ * This computes the AVPN and B fields of the first dword of a HPTE,
+ * for use when we want to match an existing PTE.  The bottom 7 bits
+ * of the returned value are zero.
+ */
+static inline unsigned long hpte_encode_avpn(unsigned long va, int psize,
+					     int ssize)
+{
+	unsigned long v;
+
+	v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
+	v <<= HPTE_V_AVPN_SHIFT;
+	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+	return v;
+}
+
+#else
  /* . */
+#endif
 /*
  * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
  * the low 3 bits of flags happen to line up.  So no transform is needed.
@@ -443,14 +478,22 @@ static void pSeries_lpar_hptab_clear(voi
  */
 static long pSeries_lpar_hpte_updatepp(unsigned long slot,
 				       unsigned long newpp,
+#ifndef CONFIG_BIGMEM
+				       unsigned long va,
+#else
 				       unsigned long vpn,
+#endif
 				       int psize, int ssize, int local)
 {
 	unsigned long lpar_rc;
 	unsigned long flags = (newpp & 7) | H_AVPN;
 	unsigned long want_v;
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_avpn(va, psize, ssize);
+#else
 	want_v = hpte_encode_avpn(vpn, psize, ssize);
+#endif
 
 	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
 		 want_v, slot, flags, psize);
@@ -488,15 +531,24 @@ static unsigned long pSeries_lpar_hpte_g
 	return dword0;
 }
 
+#ifndef CONFIG_BIGMEM
+static long pSeries_lpar_hpte_find(unsigned long va, int psize, int ssize)
+#else
 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
+#endif
 {
 	unsigned long hash;
 	unsigned long i;
 	long slot;
 	unsigned long want_v, hpte_v;
 
+#ifndef CONFIG_BIGMEM
+	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
+	want_v = hpte_encode_avpn(va, psize, ssize);
+#else
 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
 	want_v = hpte_encode_avpn(vpn, psize, ssize);
+#endif
 
 	/* Bolted entries are always in the primary group */
 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -516,13 +568,25 @@ static void pSeries_lpar_hpte_updatebolt
 					     unsigned long ea,
 					     int psize, int ssize)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long lpar_rc, slot, vsid, va, flags;
+#else
 	unsigned long vpn;
 	unsigned long lpar_rc, slot, vsid, flags;
+#endif
 
 	vsid = get_kernel_vsid(ea, ssize);
+#ifndef CONFIG_BIGMEM
+	va = hpt_va(ea, vsid, ssize);
+#else
 	vpn = hpt_vpn(ea, vsid, ssize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	slot = pSeries_lpar_hpte_find(va, psize, ssize);
+#else
 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
+#endif
 	BUG_ON(slot == -1);
 
 	flags = newpp & 7;
@@ -531,17 +595,30 @@ static void pSeries_lpar_hpte_updatebolt
 	BUG_ON(lpar_rc != H_SUCCESS);
 }
 
+#ifndef CONFIG_BIGMEM
+static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
+#else
 static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
+#endif
 					 int psize, int ssize, int local)
 {
 	unsigned long want_v;
 	unsigned long lpar_rc;
 	unsigned long dummy1, dummy2;
 
+#ifndef CONFIG_BIGMEM
+	pr_devel("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
+		 slot, va, psize, local);
+#else
 	pr_devel("    inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
 		 slot, vpn, psize, local);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	want_v = hpte_encode_avpn(va, psize, ssize);
+#else
 	want_v = hpte_encode_avpn(vpn, psize, ssize);
+#endif
 	lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
 	if (lpar_rc == H_NOT_FOUND)
 		return;
@@ -552,16 +629,32 @@ static void pSeries_lpar_hpte_invalidate
 static void pSeries_lpar_hpte_removebolted(unsigned long ea,
 					   int psize, int ssize)
 {
+#ifndef CONFIG_BIGMEM
+	unsigned long slot, vsid, va;
+#else
 	unsigned long vpn;
 	unsigned long slot, vsid;
+#endif
 
 	vsid = get_kernel_vsid(ea, ssize);
+#ifndef CONFIG_BIGMEM
+	va = hpt_va(ea, vsid, ssize);
+#else
 	vpn = hpt_vpn(ea, vsid, ssize);
+#endif
 
+#ifndef CONFIG_BIGMEM
+	slot = pSeries_lpar_hpte_find(va, psize, ssize);
+#else
 	slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
+#endif
 	BUG_ON(slot == -1);
 
+#ifndef CONFIG_BIGMEM
+	pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0);
+#else
 	pSeries_lpar_hpte_invalidate(slot, vpn, psize, ssize, 0);
+#endif
 }
 
 /* Flag bits for H_BULK_REMOVE */
@@ -577,12 +670,17 @@ static void pSeries_lpar_hpte_removebolt
  */
 static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
+#ifdef CONFIG_BIGMEM
 	unsigned long vpn;
+#endif
 	unsigned long i, pix, rc;
 	unsigned long flags = 0;
 	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
 	unsigned long param[9];
+#ifndef CONFIG_BIGMEM
+	unsigned long va;
+#endif
 	unsigned long hash, index, shift, hidx, slot;
 	real_pte_t pte;
 	int psize, ssize;
@@ -594,21 +692,38 @@ static void pSeries_lpar_flush_hash_rang
 	ssize = batch->ssize;
 	pix = 0;
 	for (i = 0; i < number; i++) {
+#ifndef CONFIG_BIGMEM
+		va = batch->vaddr[i];
+#else
 		vpn = batch->vpn[i];
+#endif
 		pte = batch->pte[i];
+#ifndef CONFIG_BIGMEM
+		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+			hash = hpt_hash(va, shift, ssize);
+#else
 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
 			hash = hpt_hash(vpn, shift, ssize);
+#endif
 			hidx = __rpte_to_hidx(pte, index);
 			if (hidx & _PTEIDX_SECONDARY)
 				hash = ~hash;
 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 			slot += hidx & _PTEIDX_GROUP_IX;
 			if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+#ifndef CONFIG_BIGMEM
+				pSeries_lpar_hpte_invalidate(slot, va, psize,
+#else
 				pSeries_lpar_hpte_invalidate(slot, vpn, psize,
+#endif
 							     ssize, local);
 			} else {
 				param[pix] = HBR_REQUEST | HBR_AVPN | slot;
+#ifndef CONFIG_BIGMEM
+				param[pix+1] = hpte_encode_avpn(va, psize,
+#else
 				param[pix+1] = hpte_encode_avpn(vpn, psize,
+#endif
 								ssize);
 				pix += 2;
 				if (pix == 8) {
Only in a/arch/powerpc/platforms/pseries: lpar.c.orig
diff -urp a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2702,10 +2702,36 @@ static void dump_slb(void)
 	}
 }
 
+#ifndef CONFIG_BIGMEM
+static void dump_stab(void)
+{
+	int i;
+	unsigned long *tmp = (unsigned long *)get_paca()->stab_addr;
+
+	printf("Segment table contents of cpu %x\n", smp_processor_id());
+
+	for (i = 0; i < PAGE_SIZE/16; i++) {
+		unsigned long a, b;
+
+		a = *tmp++;
+		b = *tmp++;
+
+		if (a || b) {
+			printf("%03d %016lx ", i, a);
+			printf("%016lx\n", b);
+		}
+	}
+}
+
+#endif
 void dump_segments(void)
 {
 	if (mmu_has_feature(MMU_FTR_SLB))
 		dump_slb();
+#ifndef CONFIG_BIGMEM
+	else
+		dump_stab();
+#endif
 }
 #endif
 
