cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hash_low.S (17766B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 *  PowerPC version
      4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
      5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
      6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
      7 *  Adapted for Power Macintosh by Paul Mackerras.
      8 *  Low-level exception handlers and MMU support
      9 *  rewritten by Paul Mackerras.
     10 *    Copyright (C) 1996 Paul Mackerras.
     11 *
     12 *  This file contains low-level assembler routines for managing
     13 *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
     14 *  hash table, so this file is not used on them.)
     15 */
     16
     17#include <linux/pgtable.h>
     18#include <linux/init.h>
     19#include <asm/reg.h>
     20#include <asm/page.h>
     21#include <asm/cputable.h>
     22#include <asm/ppc_asm.h>
     23#include <asm/thread_info.h>
     24#include <asm/asm-offsets.h>
     25#include <asm/export.h>
     26#include <asm/feature-fixups.h>
     27#include <asm/code-patching-asm.h>
     28
     29#ifdef CONFIG_PTE_64BIT
     30#define PTE_T_SIZE		8
     31#define PTE_FLAGS_OFFSET	4	/* offset of PTE flags, in bytes */
     32#else
     33#define PTE_T_SIZE		4
     34#define PTE_FLAGS_OFFSET	0
     35#endif
     36
     37/*
     38 * Load a PTE into the hash table, if possible.
     39 * The address is in r4, and r3 contains an access flag:
     40 * _PAGE_RW (0x400) if a write.
     41 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
     42 * SPRG_THREAD contains the physical address of the current task's thread.
     43 *
     44 * Returns to the caller if the access is illegal or there is no
     45 * mapping for the address.  Otherwise it places an appropriate PTE
     46 * in the hash table and returns from the exception.
     47 * Uses r0, r3 - r6, r8, r10, ctr, lr.
     48 */
     49	.text
     50_GLOBAL(hash_page)
     51#ifdef CONFIG_SMP
     52	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@h
     53	ori	r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
     54	lis	r0,0x0fff
     55	b	10f
     5611:	lwz	r6,0(r8)
     57	cmpwi	0,r6,0
     58	bne	11b
     5910:	lwarx	r6,0,r8
     60	cmpwi	0,r6,0
     61	bne-	11b
     62	stwcx.	r0,0,r8
     63	bne-	10b
     64	isync
     65#endif
     66	/* Get PTE (linux-style) and check access */
     67	lis	r0, TASK_SIZE@h		/* check if kernel address */
     68	cmplw	0,r4,r0
     69	mfspr	r8,SPRN_SPRG_THREAD	/* current task's THREAD (phys) */
     70	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
     71	lwz	r5,PGDIR(r8)		/* virt page-table root */
     72	blt+	112f			/* assume user more likely */
     73	lis	r5,swapper_pg_dir@ha	/* if kernel address, use */
     74	addi	r5,r5,swapper_pg_dir@l	/* kernel page table */
     75	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
     76112:	tophys(r5, r5)
     77#ifndef CONFIG_PTE_64BIT
     78	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
     79	lwz	r8,0(r5)		/* get pmd entry */
     80	rlwinm.	r8,r8,0,0,19		/* extract address of pte page */
     81#else
     82	rlwinm	r8,r4,13,19,29		/* Compute pgdir/pmd offset */
     83	lwzx	r8,r8,r5		/* Get L1 entry */
     84	rlwinm.	r8,r8,0,0,20		/* extract pt base address */
     85#endif
     86#ifdef CONFIG_SMP
     87	beq-	.Lhash_page_out		/* return if no mapping */
     88#else
     89	/* XXX it seems like the 601 will give a machine fault on the
     90	   rfi if its alignment is wrong (bottom 4 bits of address are
     91	   8 or 0xc) and we have had a not-taken conditional branch
     92	   to the address following the rfi. */
     93	beqlr-
     94#endif
     95#ifndef CONFIG_PTE_64BIT
     96	rlwimi	r8,r4,22,20,29		/* insert next 10 bits of address */
     97#else
     98	rlwimi	r8,r4,23,20,28		/* compute pte address */
     99	/*
    100	 * If PTE_64BIT is set, the low word is the flags word; use that
    101	 * word for locking since it contains all the interesting bits.
    102	 */
    103	addi	r8,r8,PTE_FLAGS_OFFSET
    104#endif
    105
    106	/*
    107	 * Update the linux PTE atomically.  We do the lwarx up-front
    108	 * because almost always, there won't be a permission violation
    109	 * and there won't already be an HPTE, and thus we will have
    110	 * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
    111	 */
    112.Lretry:
    113	lwarx	r6,0,r8			/* get linux-style pte, flag word */
    114#ifdef CONFIG_PPC_KUAP
    115	mfsrin	r5,r4
    116	rlwinm	r0,r9,28,_PAGE_RW	/* MSR[PR] => _PAGE_RW */
    117	rlwinm	r5,r5,12,_PAGE_RW	/* Ks => _PAGE_RW */
    118	andc	r5,r5,r0		/* Ks & ~MSR[PR] */
    119	andc	r5,r6,r5		/* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
    120	andc.	r5,r3,r5		/* check access & ~permission */
    121#else
    122	andc.	r5,r3,r6		/* check access & ~permission */
    123#endif
    124	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
    125	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
    126#ifdef CONFIG_SMP
    127	bne-	.Lhash_page_out		/* return if access not permitted */
    128#else
    129	bnelr-
    130#endif
    131	or	r5,r0,r6		/* set accessed/dirty bits */
    132#ifdef CONFIG_PTE_64BIT
    133#ifdef CONFIG_SMP
    134	subf	r10,r6,r8		/* create false data dependency */
    135	subi	r10,r10,PTE_FLAGS_OFFSET
    136	lwzx	r10,r6,r10		/* Get upper PTE word */
    137#else
    138	lwz	r10,-PTE_FLAGS_OFFSET(r8)
    139#endif /* CONFIG_SMP */
    140#endif /* CONFIG_PTE_64BIT */
    141	stwcx.	r5,0,r8			/* attempt to update PTE */
    142	bne-	.Lretry			/* retry if someone got there first */
    143
    144	mfsrin	r3,r4			/* get segment reg for segment */
    145	bl	create_hpte		/* add the hash table entry */
    146
    147#ifdef CONFIG_SMP
    148	eieio
    149	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
    150	li	r0,0
    151	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
    152#endif
    153	b	fast_hash_page_return
    154
    155#ifdef CONFIG_SMP
    156.Lhash_page_out:
    157	eieio
    158	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
    159	li	r0,0
    160	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
    161	blr
    162#endif /* CONFIG_SMP */
    163_ASM_NOKPROBE_SYMBOL(hash_page)
    164
    165/*
    166 * Add an entry for a particular page to the hash table.
    167 *
    168 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
    169 *
    170 * We assume any necessary modifications to the pte (e.g. setting
    171 * the accessed bit) have already been done and that there is actually
    172 * a hash table in use (i.e. we're not on a 603).
    173 */
    174_GLOBAL(add_hash_page)
    175	mflr	r0
    176	stw	r0,4(r1)
    177
    178#ifdef CONFIG_SMP
    179	lwz	r8,TASK_CPU(r2)		/* to go in mmu_hash_lock */
    180	oris	r8,r8,12
    181#endif /* CONFIG_SMP */
    182
    183	/*
    184	 * We disable interrupts here, even on UP, because we don't
    185	 * want to race with hash_page, and because we want the
    186	 * _PAGE_HASHPTE bit to be a reliable indication of whether
    187	 * the HPTE exists (or at least whether one did once).
    188	 * We also turn off the MMU for data accesses so that we
    189	 * we can't take a hash table miss (assuming the code is
    190	 * covered by a BAT).  -- paulus
    191	 */
    192	mfmsr	r9
    193	rlwinm	r0,r9,0,17,15		/* clear bit 16 (MSR_EE) */
    194	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
    195	mtmsr	r0
    196	isync
    197
    198#ifdef CONFIG_SMP
    199	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
    200	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
    20110:	lwarx	r0,0,r6			/* take the mmu_hash_lock */
    202	cmpi	0,r0,0
    203	bne-	11f
    204	stwcx.	r8,0,r6
    205	beq+	12f
    20611:	lwz	r0,0(r6)
    207	cmpi	0,r0,0
    208	beq	10b
    209	b	11b
    21012:	isync
    211#endif
    212
    213	/*
    214	 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
    215	 * If _PAGE_HASHPTE was already set, we don't replace the existing
    216	 * HPTE, so we just unlock and return.
    217	 */
    218	mr	r8,r5
    219#ifndef CONFIG_PTE_64BIT
    220	rlwimi	r8,r4,22,20,29
    221#else
    222	rlwimi	r8,r4,23,20,28
    223	addi	r8,r8,PTE_FLAGS_OFFSET
    224#endif
    2251:	lwarx	r6,0,r8
    226	andi.	r0,r6,_PAGE_HASHPTE
    227	bne	9f			/* if HASHPTE already set, done */
    228#ifdef CONFIG_PTE_64BIT
    229#ifdef CONFIG_SMP
    230	subf	r10,r6,r8		/* create false data dependency */
    231	subi	r10,r10,PTE_FLAGS_OFFSET
    232	lwzx	r10,r6,r10		/* Get upper PTE word */
    233#else
    234	lwz	r10,-PTE_FLAGS_OFFSET(r8)
    235#endif /* CONFIG_SMP */
    236#endif /* CONFIG_PTE_64BIT */
    237	ori	r5,r6,_PAGE_HASHPTE
    238	stwcx.	r5,0,r8
    239	bne-	1b
    240
    241	/* Convert context and va to VSID */
    242	mulli	r3,r3,897*16		/* multiply context by context skew */
    243	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
    244	mulli	r0,r0,0x111		/* multiply by ESID skew */
    245	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
    246
    247	bl	create_hpte
    248
    2499:
    250#ifdef CONFIG_SMP
    251	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
    252	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
    253	eieio
    254	li	r0,0
    255	stw	r0,0(r6)		/* clear mmu_hash_lock */
    256#endif
    257
    258	/* reenable interrupts and DR */
    259	mtmsr	r9
    260	isync
    261
    262	lwz	r0,4(r1)
    263	mtlr	r0
    264	blr
    265_ASM_NOKPROBE_SYMBOL(add_hash_page)
    266
    267/*
    268 * This routine adds a hardware PTE to the hash table.
    269 * It is designed to be called with the MMU either on or off.
    270 * r3 contains the VSID, r4 contains the virtual address,
    271 * r5 contains the linux PTE, r6 contains the old value of the
    272 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
    273 * upper half of the PTE if CONFIG_PTE_64BIT.
    274 * On SMP, the caller should have the mmu_hash_lock held.
    275 * We assume that the caller has (or will) set the _PAGE_HASHPTE
    276 * bit in the linux PTE in memory.  The value passed in r6 should
    277 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
    278 * this routine will skip the search for an existing HPTE.
    279 * This procedure modifies r0, r3 - r6, r8, cr0.
    280 *  -- paulus.
    281 *
    282 * For speed, 4 of the instructions get patched once the size and
    283 * physical address of the hash table are known.  These definitions
    284 * of Hash_base and Hash_bits below are for the early hash table.
    285 */
    286Hash_base = early_hash
    287Hash_bits = 12				/* e.g. 256kB hash table */
    288Hash_msk = (((1 << Hash_bits) - 1) * 64)
    289
    290/* defines for the PTE format for 32-bit PPCs */
    291#define HPTE_SIZE	8
    292#define PTEG_SIZE	64
    293#define LG_PTEG_SIZE	6
    294#define LDPTEu		lwzu
    295#define LDPTE		lwz
    296#define STPTE		stw
    297#define CMPPTE		cmpw
    298#define PTE_H		0x40
    299#define PTE_V		0x80000000
    300#define TST_V(r)	rlwinm. r,r,0,0,0
    301#define SET_V(r)	oris r,r,PTE_V@h
    302#define CLR_V(r,t)	rlwinm r,r,0,1,31
    303
    304#define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
    305#define HASH_RIGHT	31-LG_PTEG_SIZE
    306
    307__REF
    308_GLOBAL(create_hpte)
    309	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
    310	rlwinm	r8,r5,32-9,30,30	/* _PAGE_RW -> PP msb */
    311	rlwinm	r0,r5,32-6,30,30	/* _PAGE_DIRTY -> PP msb */
    312	and	r8,r8,r0		/* writable if _RW & _DIRTY */
    313	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
    314	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
    315	ori	r8,r8,0xe04		/* clear out reserved bits */
    316	andc	r8,r5,r8		/* PP = user? (rw&dirty? 1: 3): 0 */
    317BEGIN_FTR_SECTION
    318	rlwinm	r8,r8,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
    319END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
    320#ifdef CONFIG_PTE_64BIT
    321	/* Put the XPN bits into the PTE */
    322	rlwimi	r8,r10,8,20,22
    323	rlwimi	r8,r10,2,29,29
    324#endif
    325
    326	/* Construct the high word of the PPC-style PTE (r5) */
    327	rlwinm	r5,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
    328	rlwimi	r5,r4,10,26,31		/* put in API (abbrev page index) */
    329	SET_V(r5)			/* set V (valid) bit */
    330
    331	patch_site	0f, patch__hash_page_A0
    332	patch_site	1f, patch__hash_page_A1
    333	patch_site	2f, patch__hash_page_A2
    334	/* Get the address of the primary PTE group in the hash table (r3) */
    3350:	lis	r0, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
    3361:	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
    3372:	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
    338	xor	r3,r3,r0		/* make primary hash */
    339	li	r0,8			/* PTEs/group */
    340
    341	/*
    342	 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
    343	 * if it is clear, meaning that the HPTE isn't there already...
    344	 */
    345	andi.	r6,r6,_PAGE_HASHPTE
    346	beq+	10f			/* no PTE: go look for an empty slot */
    347	tlbie	r4
    348
    349	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
    350	mtctr	r0
    351	addi	r4,r3,-HPTE_SIZE
    3521:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
    353	CMPPTE	0,r6,r5
    354	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
    355	beq+	.Lfound_slot
    356
    357	patch_site	0f, patch__hash_page_B
    358	/* Search the secondary PTEG for a matching PTE */
    359	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
    3600:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
    361	xori	r4,r4,(-PTEG_SIZE & 0xffff)
    362	addi	r4,r4,-HPTE_SIZE
    363	mtctr	r0
    3642:	LDPTEu	r6,HPTE_SIZE(r4)
    365	CMPPTE	0,r6,r5
    366	bdnzf	2,2b
    367	beq+	.Lfound_slot
    368	xori	r5,r5,PTE_H		/* clear H bit again */
    369
    370	/* Search the primary PTEG for an empty slot */
    37110:	mtctr	r0
    372	addi	r4,r3,-HPTE_SIZE	/* search primary PTEG */
    3731:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
    374	TST_V(r6)			/* test valid bit */
    375	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
    376	beq+	.Lfound_empty
    377
    378	patch_site	0f, patch__hash_page_C
    379	/* Search the secondary PTEG for an empty slot */
    380	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
    3810:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
    382	xori	r4,r4,(-PTEG_SIZE & 0xffff)
    383	addi	r4,r4,-HPTE_SIZE
    384	mtctr	r0
    3852:	LDPTEu	r6,HPTE_SIZE(r4)
    386	TST_V(r6)
    387	bdnzf	2,2b
    388	beq+	.Lfound_empty
    389	xori	r5,r5,PTE_H		/* clear H bit again */
    390
    391	/*
    392	 * Choose an arbitrary slot in the primary PTEG to overwrite.
    393	 * Since both the primary and secondary PTEGs are full, and we
    394	 * have no information that the PTEs in the primary PTEG are
    395	 * more important or useful than those in the secondary PTEG,
    396	 * and we know there is a definite (although small) speed
    397	 * advantage to putting the PTE in the primary PTEG, we always
    398	 * put the PTE in the primary PTEG.
    399	 */
    400
    401	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
    402	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
    403	addi	r6,r6,HPTE_SIZE			/* search for candidate */
    404	andi.	r6,r6,7*HPTE_SIZE
    405	stw	r6,next_slot@l(r4)
    406	add	r4,r3,r6
    407
    408#ifndef CONFIG_SMP
    409	/* Store PTE in PTEG */
    410.Lfound_empty:
    411	STPTE	r5,0(r4)
    412.Lfound_slot:
    413	STPTE	r8,HPTE_SIZE/2(r4)
    414
    415#else /* CONFIG_SMP */
    416/*
    417 * Between the tlbie above and updating the hash table entry below,
    418 * another CPU could read the hash table entry and put it in its TLB.
    419 * There are 3 cases:
    420 * 1. using an empty slot
    421 * 2. updating an earlier entry to change permissions (i.e. enable write)
    422 * 3. taking over the PTE for an unrelated address
    423 *
    424 * In each case it doesn't really matter if the other CPUs have the old
    425 * PTE in their TLB.  So we don't need to bother with another tlbie here,
    426 * which is convenient as we've overwritten the register that had the
    427 * address. :-)  The tlbie above is mainly to make sure that this CPU comes
    428 * and gets the new PTE from the hash table.
    429 *
    430 * We do however have to make sure that the PTE is never in an invalid
    431 * state with the V bit set.
    432 */
    433.Lfound_empty:
    434.Lfound_slot:
    435	CLR_V(r5,r0)		/* clear V (valid) bit in PTE */
    436	STPTE	r5,0(r4)
    437	sync
    438	TLBSYNC
    439	STPTE	r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
    440	sync
    441	SET_V(r5)
    442	STPTE	r5,0(r4)	/* finally set V bit in PTE */
    443#endif /* CONFIG_SMP */
    444
    445	sync		/* make sure pte updates get to memory */
    446	blr
    447	.previous
    448_ASM_NOKPROBE_SYMBOL(create_hpte)
    449
    450	.section .bss
    451	.align	2
    452next_slot:
    453	.space	4
    454	.previous
    455
    456/*
    457 * Flush the entry for a particular page from the hash table.
    458 *
    459 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
    460 *		    int count)
    461 *
    462 * We assume that there is a hash table in use (Hash != 0).
    463 */
    464__REF
    465_GLOBAL(flush_hash_pages)
    466	/*
    467	 * We disable interrupts here, even on UP, because we want
    468	 * the _PAGE_HASHPTE bit to be a reliable indication of
    469	 * whether the HPTE exists (or at least whether one did once).
    470	 * We also turn off the MMU for data accesses so that we
    471	 * we can't take a hash table miss (assuming the code is
    472	 * covered by a BAT).  -- paulus
    473	 */
    474	mfmsr	r10
    475	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
    476	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
    477	mtmsr	r0
    478	isync
    479
    480	/* First find a PTE in the range that has _PAGE_HASHPTE set */
    481#ifndef CONFIG_PTE_64BIT
    482	rlwimi	r5,r4,22,20,29
    483#else
    484	rlwimi	r5,r4,23,20,28
    485	addi	r5,r5,PTE_FLAGS_OFFSET
    486#endif
    4871:	lwz	r0,0(r5)
    488	cmpwi	cr1,r6,1
    489	andi.	r0,r0,_PAGE_HASHPTE
    490	bne	2f
    491	ble	cr1,19f
    492	addi	r4,r4,0x1000
    493	addi	r5,r5,PTE_T_SIZE
    494	addi	r6,r6,-1
    495	b	1b
    496
    497	/* Convert context and va to VSID */
    4982:	mulli	r3,r3,897*16		/* multiply context by context skew */
    499	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
    500	mulli	r0,r0,0x111		/* multiply by ESID skew */
    501	add	r3,r3,r0		/* note code below trims to 24 bits */
    502
    503	/* Construct the high word of the PPC-style PTE (r11) */
    504	rlwinm	r11,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
    505	rlwimi	r11,r4,10,26,31		/* put in API (abbrev page index) */
    506	SET_V(r11)			/* set V (valid) bit */
    507
    508#ifdef CONFIG_SMP
    509	lis	r9, (mmu_hash_lock - PAGE_OFFSET)@ha
    510	addi	r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
    511	tophys	(r8, r2)
    512	lwz	r8, TASK_CPU(r8)
    513	oris	r8,r8,9
    51410:	lwarx	r0,0,r9
    515	cmpi	0,r0,0
    516	bne-	11f
    517	stwcx.	r8,0,r9
    518	beq+	12f
    51911:	lwz	r0,0(r9)
    520	cmpi	0,r0,0
    521	beq	10b
    522	b	11b
    52312:	isync
    524#endif
    525
    526	/*
    527	 * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
    528	 * already clear, we're done (for this pte).  If not,
    529	 * clear it (atomically) and proceed.  -- paulus.
    530	 */
    53133:	lwarx	r8,0,r5			/* fetch the pte flags word */
    532	andi.	r0,r8,_PAGE_HASHPTE
    533	beq	8f			/* done if HASHPTE is already clear */
    534	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
    535	stwcx.	r8,0,r5			/* update the pte */
    536	bne-	33b
    537
    538	patch_site	0f, patch__flush_hash_A0
    539	patch_site	1f, patch__flush_hash_A1
    540	patch_site	2f, patch__flush_hash_A2
    541	/* Get the address of the primary PTE group in the hash table (r3) */
    5420:	lis	r8, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
    5431:	rlwimi	r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
    5442:	rlwinm	r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
    545	xor	r8,r0,r8		/* make primary hash */
    546
    547	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
    548	li	r0,8			/* PTEs/group */
    549	mtctr	r0
    550	addi	r12,r8,-HPTE_SIZE
    5511:	LDPTEu	r0,HPTE_SIZE(r12)	/* get next PTE */
    552	CMPPTE	0,r0,r11
    553	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
    554	beq+	3f
    555
    556	patch_site	0f, patch__flush_hash_B
    557	/* Search the secondary PTEG for a matching PTE */
    558	ori	r11,r11,PTE_H		/* set H (secondary hash) bit */
    559	li	r0,8			/* PTEs/group */
    5600:	xoris	r12,r8,Hash_msk>>16	/* compute secondary hash */
    561	xori	r12,r12,(-PTEG_SIZE & 0xffff)
    562	addi	r12,r12,-HPTE_SIZE
    563	mtctr	r0
    5642:	LDPTEu	r0,HPTE_SIZE(r12)
    565	CMPPTE	0,r0,r11
    566	bdnzf	2,2b
    567	xori	r11,r11,PTE_H		/* clear H again */
    568	bne-	4f			/* should rarely fail to find it */
    569
    5703:	li	r0,0
    571	STPTE	r0,0(r12)		/* invalidate entry */
    5724:	sync
    573	tlbie	r4			/* in hw tlb too */
    574	sync
    575
    5768:	ble	cr1,9f			/* if all ptes checked */
    57781:	addi	r6,r6,-1
    578	addi	r5,r5,PTE_T_SIZE
    579	addi	r4,r4,0x1000
    580	lwz	r0,0(r5)		/* check next pte */
    581	cmpwi	cr1,r6,1
    582	andi.	r0,r0,_PAGE_HASHPTE
    583	bne	33b
    584	bgt	cr1,81b
    585
    5869:
    587#ifdef CONFIG_SMP
    588	TLBSYNC
    589	li	r0,0
    590	stw	r0,0(r9)		/* clear mmu_hash_lock */
    591#endif
    592
    59319:	mtmsr	r10
    594	isync
    595	blr
    596	.previous
    597EXPORT_SYMBOL(flush_hash_pages)
    598_ASM_NOKPROBE_SYMBOL(flush_hash_pages)