Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> * * This file is based on sample code from ARMv8 ARM. */ #include <asm-offsets.h> #include <config.h> #include <asm/macro.h> #include <asm/system.h> #include <linux/linkage.h> /* * void __asm_dcache_level(level) * * flush or invalidate one level cache. * * x0: cache level * x1: 0 clean & invalidate, 1 invalidate only * x2~x9: clobbered */ .pushsection .text.__asm_dcache_level, "ax" ENTRY(__asm_dcache_level) lsl x12, x0, #1 msr csselr_el1, x12 /* select cache level */ isb /* sync change of cssidr_el1 */ mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ add x2, x2, #4 /* x2 <- log2(cache line size) */ mov x3, #0x3ff and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ clz w5, w3 /* bit position of #ways */ mov x4, #0x7fff and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ /* x12 <- cache level << 1 */ /* x2 <- line length offset */ /* x3 <- number of cache ways - 1 */ /* x4 <- number of cache sets - 1 */ /* x5 <- bit position of #ways */ loop_set: mov x6, x3 /* x6 <- working copy of #ways */ loop_way: lsl x7, x6, x5 orr x9, x12, x7 /* map way and level to cisw value */ lsl x7, x4, x2 orr x9, x9, x7 /* map set number to cisw value */ tbz w1, #0, 1f dc isw, x9 b 2f 1: dc cisw, x9 /* clean & invalidate by set/way */ 2: subs x6, x6, #1 /* decrement the way */ b.ge loop_way subs x4, x4, #1 /* decrement the set */ b.ge loop_set ret ENDPROC(__asm_dcache_level) .popsection /* * void __asm_flush_dcache_all(int invalidate_only) * * x0: 0 clean & invalidate, 1 invalidate only * * flush or invalidate all data cache by SET/WAY. */ .pushsection .text.__asm_dcache_all, "ax" ENTRY(__asm_dcache_all) mov x1, x0 dsb sy mrs x10, clidr_el1 /* read clidr_el1 */ lsr x11, x10, #24 and x11, x11, #0x7 /* x11 <- loc */ cbz x11, finished /* if loc is 0, exit */ mov x15, lr mov x0, #0 /* start flush at cache level 0 */ /* x0 <- cache level */ /* x10 <- clidr_el1 */ /* x11 <- loc */ /* x15 <- return address */ loop_level: lsl x12, x0, #1 add x12, x12, x0 /* x0 <- tripled cache level */ lsr x12, x10, x12 and x12, x12, #7 /* x12 <- cache type */ cmp x12, #2 b.lt skip /* skip if no cache or icache */ bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ skip: add x0, x0, #1 /* increment cache level */ cmp x11, x0 b.gt loop_level mov x0, #0 msr csselr_el1, x0 /* restore csselr_el1 */ dsb sy isb mov lr, x15 finished: ret ENDPROC(__asm_dcache_all) .popsection .pushsection .text.__asm_flush_dcache_all, "ax" ENTRY(__asm_flush_dcache_all) mov x0, #0 b __asm_dcache_all ENDPROC(__asm_flush_dcache_all) .popsection .pushsection .text.__asm_invalidate_dcache_all, "ax" ENTRY(__asm_invalidate_dcache_all) mov x0, #0x1 b __asm_dcache_all ENDPROC(__asm_invalidate_dcache_all) .popsection /* * void __asm_flush_dcache_range(start, end) * * clean & invalidate data cache in the range * * x0: start address * x1: end address */ .pushsection .text.__asm_flush_dcache_range, "ax" ENTRY(__asm_flush_dcache_range) mrs x3, ctr_el0 lsr x3, x3, #16 and x3, x3, #0xf mov x2, #4 lsl x2, x2, x3 /* cache line size */ /* x2 <- minimal cache line size in cache system */ sub x3, x2, #1 bic x0, x0, x3 1: dc civac, x0 /* clean & invalidate data or unified cache */ add x0, x0, x2 cmp x0, x1 b.lo 1b dsb sy ret ENDPROC(__asm_flush_dcache_range) .popsection /* * void __asm_invalidate_dcache_range(start, end) * * invalidate data cache in the range * * x0: start address * x1: end address */ .pushsection .text.__asm_invalidate_dcache_range, "ax" ENTRY(__asm_invalidate_dcache_range) mrs x3, ctr_el0 ubfm x3, x3, #16, #19 mov x2, #4 lsl x2, x2, x3 /* cache line size */ /* x2 <- minimal cache line size in cache system */ sub x3, x2, #1 bic x0, x0, x3 1: dc ivac, x0 /* invalidate data or unified cache */ add x0, x0, x2 cmp x0, x1 b.lo 1b dsb sy ret ENDPROC(__asm_invalidate_dcache_range) .popsection /* * void __asm_invalidate_icache_all(void) * * invalidate all tlb entries. */ .pushsection .text.__asm_invalidate_icache_all, "ax" ENTRY(__asm_invalidate_icache_all) ic ialluis isb sy ret ENDPROC(__asm_invalidate_icache_all) .popsection .pushsection .text.__asm_invalidate_l3_dcache, "ax" ENTRY(__asm_invalidate_l3_dcache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_invalidate_l3_dcache) .weak __asm_invalidate_l3_dcache .popsection .pushsection .text.__asm_flush_l3_dcache, "ax" ENTRY(__asm_flush_l3_dcache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_flush_l3_dcache) .weak __asm_flush_l3_dcache .popsection .pushsection .text.__asm_invalidate_l3_icache, "ax" ENTRY(__asm_invalidate_l3_icache) mov x0, #0 /* return status as success */ ret ENDPROC(__asm_invalidate_l3_icache) .weak __asm_invalidate_l3_icache .popsection /* * void __asm_switch_ttbr(ulong new_ttbr) * * Safely switches to a new page table. */ .pushsection .text.__asm_switch_ttbr, "ax" ENTRY(__asm_switch_ttbr) /* x2 = SCTLR (alive throghout the function) */ switch_el x4, 3f, 2f, 1f 3: mrs x2, sctlr_el3 b 0f 2: mrs x2, sctlr_el2 b 0f 1: mrs x2, sctlr_el1 0: /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ movn x1, #(CR_M | CR_C | CR_I) and x1, x2, x1 switch_el x4, 3f, 2f, 1f 3: msr sctlr_el3, x1 b 0f 2: msr sctlr_el2, x1 b 0f 1: msr sctlr_el1, x1 0: isb /* This call only clobbers x30 (lr) and x9 (unused) */ mov x3, x30 bl __asm_invalidate_tlb_all /* From here on we're running safely with caches disabled */ /* Set TTBR to our first argument */ switch_el x4, 3f, 2f, 1f 3: msr ttbr0_el3, x0 b 0f 2: msr ttbr0_el2, x0 b 0f 1: msr ttbr0_el1, x0 0: isb /* Restore original SCTLR and thus enable caches again */ switch_el x4, 3f, 2f, 1f 3: msr sctlr_el3, x2 b 0f 2: msr sctlr_el2, x2 b 0f 1: msr sctlr_el1, x2 0: isb ret x3 ENDPROC(__asm_switch_ttbr) .popsection |