clear_page.S (2563B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* clear_page.S: UltraSparc optimized clear page. 3 * 4 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) 5 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) 6 */ 7 8#include <linux/pgtable.h> 9#include <asm/visasm.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/spitfire.h> 13#include <asm/head.h> 14#include <asm/export.h> 15 16 /* What we used to do was lock a TLB entry into a specific 17 * TLB slot, clear the page with interrupts disabled, then 18 * restore the original TLB entry. This was great for 19 * disturbing the TLB as little as possible, but it meant 20 * we had to keep interrupts disabled for a long time. 21 * 22 * Now, we simply use the normal TLB loading mechanism, 23 * and this makes the cpu choose a slot all by itself. 24 * Then we do a normal TLB flush on exit. We need only 25 * disable preemption during the clear. 26 */ 27 28 .text 29 30 .globl _clear_page 31 EXPORT_SYMBOL(_clear_page) 32_clear_page: /* %o0=dest */ 33 ba,pt %xcc, clear_page_common 34 clr %o4 35 36 /* This thing is pretty important, it shows up 37 * on the profiles via do_anonymous_page(). 38 */ 39 .align 32 40 .globl clear_user_page 41 EXPORT_SYMBOL(clear_user_page) 42clear_user_page: /* %o0=dest, %o1=vaddr */ 43 lduw [%g6 + TI_PRE_COUNT], %o2 44 sethi %hi(PAGE_OFFSET), %g2 45 sethi %hi(PAGE_SIZE), %o4 46 47 ldx [%g2 + %lo(PAGE_OFFSET)], %g2 48 sethi %hi(PAGE_KERNEL_LOCKED), %g3 49 50 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 51 sub %o0, %g2, %g1 ! paddr 52 53 and %o1, %o4, %o0 ! vaddr D-cache alias bit 54 55 or %g1, %g3, %g1 ! TTE data 56 sethi %hi(TLBTEMP_BASE), %o3 57 58 add %o2, 1, %o4 59 add %o0, %o3, %o0 ! TTE vaddr 60 61 /* Disable preemption. */ 62 mov TLB_TAG_ACCESS, %g3 63 stw %o4, [%g6 + TI_PRE_COUNT] 64 65 /* Load TLB entry. */ 66 rdpr %pstate, %o4 67 wrpr %o4, PSTATE_IE, %pstate 68 stxa %o0, [%g3] ASI_DMMU 69 stxa %g1, [%g0] ASI_DTLB_DATA_IN 70 sethi %hi(KERNBASE), %g1 71 flush %g1 72 wrpr %o4, 0x0, %pstate 73 74 mov 1, %o4 75 76clear_page_common: 77 VISEntryHalf 78 membar #StoreLoad | #StoreStore | #LoadStore 79 fzero %f0 80 sethi %hi(PAGE_SIZE/64), %o1 81 mov %o0, %g1 ! remember vaddr for tlbflush 82 fzero %f2 83 or %o1, %lo(PAGE_SIZE/64), %o1 84 faddd %f0, %f2, %f4 85 fmuld %f0, %f2, %f6 86 faddd %f0, %f2, %f8 87 fmuld %f0, %f2, %f10 88 89 faddd %f0, %f2, %f12 90 fmuld %f0, %f2, %f14 911: stda %f0, [%o0 + %g0] ASI_BLK_P 92 subcc %o1, 1, %o1 93 bne,pt %icc, 1b 94 add %o0, 0x40, %o0 95 membar #Sync 96 VISExitHalf 97 98 brz,pn %o4, out 99 nop 100 101 stxa %g0, [%g1] ASI_DMMU_DEMAP 102 membar #Sync 103 stw %o2, [%g6 + TI_PRE_COUNT] 104 105out: retl 106 nop 107