1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
4 /*
5 * PowerPC64 memory management structures
6 *
7 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
8 * PPC64 rework.
9 */
10
11 #include <asm/page.h>
12 #include <asm/bug.h>
13 #include <asm/asm-const.h>
14
15 /*
16 * This is necessary to get the definition of PGTABLE_RANGE which we
17 * need for various slices related matters. Note that this isn't the
18 * complete pgtable.h but only a portion of it.
19 */
20 #include <asm/book3s/64/pgtable.h>
21 #include <asm/task_size_64.h>
22 #include <asm/cpu_has_feature.h>
23
24 /*
25 * SLB
26 */
27
28 #define SLB_NUM_BOLTED 2
29 #define SLB_CACHE_ENTRIES 8
30 #define SLB_MIN_SIZE 32
31
32 /* Bits in the SLB ESID word */
33 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
34
35 /* Bits in the SLB VSID word */
36 #define SLB_VSID_SHIFT 12
37 #define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
38 #define SLB_VSID_SHIFT_1T 24
39 #define SLB_VSID_SSIZE_SHIFT 62
40 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
41 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
42 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
43 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
44 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
45 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
46 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
47 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
48 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
49 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
50 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
51 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
52 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
53 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
54
55 #define SLB_VSID_KERNEL (SLB_VSID_KP)
56 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
57
58 #define SLBIE_C (0x08000000)
59 #define SLBIE_SSIZE_SHIFT 25
60
61 /*
62 * Hash table
63 */
64
65 #define HPTES_PER_GROUP 8
66
67 #define HPTE_V_SSIZE_SHIFT 62
68 #define HPTE_V_AVPN_SHIFT 7
69 #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
70 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
71 #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
72 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
73 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
74 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
75 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
76 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
77 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
78 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
79
80 /*
81 * ISA 3.0 has a different HPTE format.
82 */
83 #define HPTE_R_3_0_SSIZE_SHIFT 58
84 #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
85 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
86 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
87 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
88 #define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
89 #define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
90 #define HPTE_R_RPN_SHIFT 12
91 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
92 #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
93 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
94 #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
95 #define HPTE_R_N ASM_CONST(0x0000000000000004)
96 #define HPTE_R_G ASM_CONST(0x0000000000000008)
97 #define HPTE_R_M ASM_CONST(0x0000000000000010)
98 #define HPTE_R_I ASM_CONST(0x0000000000000020)
99 #define HPTE_R_W ASM_CONST(0x0000000000000040)
100 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
101 #define HPTE_R_C ASM_CONST(0x0000000000000080)
102 #define HPTE_R_R ASM_CONST(0x0000000000000100)
103 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
104 #define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
105 #define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
106 #define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
107 #define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
108
109 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
110 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
111
112 /* Values for PP (assumes Ks=0, Kp=1) */
113 #define PP_RWXX 0 /* Supervisor read/write, User none */
114 #define PP_RWRX 1 /* Supervisor read/write, User read */
115 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
116 #define PP_RXRX 3 /* Supervisor read, User read */
117 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
118
119 /* Fields for tlbiel instruction in architecture 2.06 */
120 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
121 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
122 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
123 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
124 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
125 #define TLBIEL_INVAL_SET_SHIFT 12
126
127 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
128 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
129 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
130 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
131
132 #ifndef __ASSEMBLY__
133
134 struct mmu_hash_ops {
135 void (*hpte_invalidate)(unsigned long slot,
136 unsigned long vpn,
137 int bpsize, int apsize,
138 int ssize, int local);
139 long (*hpte_updatepp)(unsigned long slot,
140 unsigned long newpp,
141 unsigned long vpn,
142 int bpsize, int apsize,
143 int ssize, unsigned long flags);
144 void (*hpte_updateboltedpp)(unsigned long newpp,
145 unsigned long ea,
146 int psize, int ssize);
147 long (*hpte_insert)(unsigned long hpte_group,
148 unsigned long vpn,
149 unsigned long prpn,
150 unsigned long rflags,
151 unsigned long vflags,
152 int psize, int apsize,
153 int ssize);
154 long (*hpte_remove)(unsigned long hpte_group);
155 int (*hpte_removebolted)(unsigned long ea,
156 int psize, int ssize);
157 void (*flush_hash_range)(unsigned long number, int local);
158 void (*hugepage_invalidate)(unsigned long vsid,
159 unsigned long addr,
160 unsigned char *hpte_slot_array,
161 int psize, int ssize, int local);
162 int (*resize_hpt)(unsigned long shift);
163 /*
164 * Special for kexec.
165 * To be called in real mode with interrupts disabled. No locks are
166 * taken as such, concurrent access on pre POWER5 hardware could result
167 * in a deadlock.
168 * The linear mapping is destroyed as well.
169 */
170 void (*hpte_clear_all)(void);
171 };
172 extern struct mmu_hash_ops mmu_hash_ops;
173
174 struct hash_pte {
175 __be64 v;
176 __be64 r;
177 };
178
179 extern struct hash_pte *htab_address;
180 extern unsigned long htab_size_bytes;
181 extern unsigned long htab_hash_mask;
182
183
shift_to_mmu_psize(unsigned int shift)184 static inline int shift_to_mmu_psize(unsigned int shift)
185 {
186 int psize;
187
188 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
189 if (mmu_psize_defs[psize].shift == shift)
190 return psize;
191 return -1;
192 }
193
mmu_psize_to_shift(unsigned int mmu_psize)194 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
195 {
196 if (mmu_psize_defs[mmu_psize].shift)
197 return mmu_psize_defs[mmu_psize].shift;
198 BUG();
199 }
200
ap_to_shift(unsigned long ap)201 static inline unsigned int ap_to_shift(unsigned long ap)
202 {
203 int psize;
204
205 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
206 if (mmu_psize_defs[psize].ap == ap)
207 return mmu_psize_defs[psize].shift;
208 }
209
210 return -1;
211 }
212
get_sllp_encoding(int psize)213 static inline unsigned long get_sllp_encoding(int psize)
214 {
215 unsigned long sllp;
216
217 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
218 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
219 return sllp;
220 }
221
222 #endif /* __ASSEMBLY__ */
223
224 /*
225 * Segment sizes.
226 * These are the values used by hardware in the B field of
227 * SLB entries and the first dword of MMU hashtable entries.
228 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
229 */
230 #define MMU_SEGSIZE_256M 0
231 #define MMU_SEGSIZE_1T 1
232
233 /*
234 * encode page number shift.
235 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
236 * 12 bits. This enable us to address upto 76 bit va.
237 * For hpt hash from a va we can ignore the page size bits of va and for
238 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
239 * we work in all cases including 4k page size.
240 */
241 #define VPN_SHIFT 12
242
243 /*
244 * HPTE Large Page (LP) details
245 */
246 #define LP_SHIFT 12
247 #define LP_BITS 8
248 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
249
250 #ifndef __ASSEMBLY__
251
slb_vsid_shift(int ssize)252 static inline int slb_vsid_shift(int ssize)
253 {
254 if (ssize == MMU_SEGSIZE_256M)
255 return SLB_VSID_SHIFT;
256 return SLB_VSID_SHIFT_1T;
257 }
258
segment_shift(int ssize)259 static inline int segment_shift(int ssize)
260 {
261 if (ssize == MMU_SEGSIZE_256M)
262 return SID_SHIFT;
263 return SID_SHIFT_1T;
264 }
265
266 /*
267 * This array is indexed by the LP field of the HPTE second dword.
268 * Since this field may contain some RPN bits, some entries are
269 * replicated so that we get the same value irrespective of RPN.
270 * The top 4 bits are the page size index (MMU_PAGE_*) for the
271 * actual page size, the bottom 4 bits are the base page size.
272 */
273 extern u8 hpte_page_sizes[1 << LP_BITS];
274
__hpte_page_size(unsigned long h,unsigned long l,bool is_base_size)275 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
276 bool is_base_size)
277 {
278 unsigned int i, lp;
279
280 if (!(h & HPTE_V_LARGE))
281 return 1ul << 12;
282
283 /* Look at the 8 bit LP value */
284 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
285 i = hpte_page_sizes[lp];
286 if (!i)
287 return 0;
288 if (!is_base_size)
289 i >>= 4;
290 return 1ul << mmu_psize_defs[i & 0xf].shift;
291 }
292
hpte_page_size(unsigned long h,unsigned long l)293 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
294 {
295 return __hpte_page_size(h, l, 0);
296 }
297
hpte_base_page_size(unsigned long h,unsigned long l)298 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
299 {
300 return __hpte_page_size(h, l, 1);
301 }
302
303 /*
304 * The current system page and segment sizes
305 */
306 extern int mmu_kernel_ssize;
307 extern int mmu_highuser_ssize;
308 extern u16 mmu_slb_size;
309 extern unsigned long tce_alloc_start, tce_alloc_end;
310
311 /*
312 * If the processor supports 64k normal pages but not 64k cache
313 * inhibited pages, we have to be prepared to switch processes
314 * to use 4k pages when they create cache-inhibited mappings.
315 * If this is the case, mmu_ci_restrictions will be set to 1.
316 */
317 extern int mmu_ci_restrictions;
318
319 /*
320 * This computes the AVPN and B fields of the first dword of a HPTE,
321 * for use when we want to match an existing PTE. The bottom 7 bits
322 * of the returned value are zero.
323 */
hpte_encode_avpn(unsigned long vpn,int psize,int ssize)324 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
325 int ssize)
326 {
327 unsigned long v;
328 /*
329 * The AVA field omits the low-order 23 bits of the 78 bits VA.
330 * These bits are not needed in the PTE, because the
331 * low-order b of these bits are part of the byte offset
332 * into the virtual page and, if b < 23, the high-order
333 * 23-b of these bits are always used in selecting the
334 * PTEGs to be searched
335 */
336 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
337 v <<= HPTE_V_AVPN_SHIFT;
338 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
339 return v;
340 }
341
342 /*
343 * ISA v3.0 defines a new HPTE format, which differs from the old
344 * format in having smaller AVPN and ARPN fields, and the B field
345 * in the second dword instead of the first.
346 */
hpte_old_to_new_v(unsigned long v)347 static inline unsigned long hpte_old_to_new_v(unsigned long v)
348 {
349 /* trim AVPN, drop B */
350 return v & HPTE_V_COMMON_BITS;
351 }
352
hpte_old_to_new_r(unsigned long v,unsigned long r)353 static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
354 {
355 /* move B field from 1st to 2nd dword, trim ARPN */
356 return (r & ~HPTE_R_3_0_SSIZE_MASK) |
357 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
358 }
359
hpte_new_to_old_v(unsigned long v,unsigned long r)360 static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
361 {
362 /* insert B field */
363 return (v & HPTE_V_COMMON_BITS) |
364 ((r & HPTE_R_3_0_SSIZE_MASK) <<
365 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
366 }
367
hpte_new_to_old_r(unsigned long r)368 static inline unsigned long hpte_new_to_old_r(unsigned long r)
369 {
370 /* clear out B field */
371 return r & ~HPTE_R_3_0_SSIZE_MASK;
372 }
373
hpte_get_old_v(struct hash_pte * hptep)374 static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
375 {
376 unsigned long hpte_v;
377
378 hpte_v = be64_to_cpu(hptep->v);
379 if (cpu_has_feature(CPU_FTR_ARCH_300))
380 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
381 return hpte_v;
382 }
383
384 /*
385 * This function sets the AVPN and L fields of the HPTE appropriately
386 * using the base page size and actual page size.
387 */
hpte_encode_v(unsigned long vpn,int base_psize,int actual_psize,int ssize)388 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
389 int actual_psize, int ssize)
390 {
391 unsigned long v;
392 v = hpte_encode_avpn(vpn, base_psize, ssize);
393 if (actual_psize != MMU_PAGE_4K)
394 v |= HPTE_V_LARGE;
395 return v;
396 }
397
398 /*
399 * This function sets the ARPN, and LP fields of the HPTE appropriately
400 * for the page size. We assume the pa is already "clean" that is properly
401 * aligned for the requested page size
402 */
hpte_encode_r(unsigned long pa,int base_psize,int actual_psize)403 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
404 int actual_psize)
405 {
406 /* A 4K page needs no special encoding */
407 if (actual_psize == MMU_PAGE_4K)
408 return pa & HPTE_R_RPN;
409 else {
410 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
411 unsigned int shift = mmu_psize_defs[actual_psize].shift;
412 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
413 }
414 }
415
416 /*
417 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
418 */
hpt_vpn(unsigned long ea,unsigned long vsid,int ssize)419 static inline unsigned long hpt_vpn(unsigned long ea,
420 unsigned long vsid, int ssize)
421 {
422 unsigned long mask;
423 int s_shift = segment_shift(ssize);
424
425 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
426 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
427 }
428
429 /*
430 * This hashes a virtual address
431 */
hpt_hash(unsigned long vpn,unsigned int shift,int ssize)432 static inline unsigned long hpt_hash(unsigned long vpn,
433 unsigned int shift, int ssize)
434 {
435 unsigned long mask;
436 unsigned long hash, vsid;
437
438 /* VPN_SHIFT can be atmost 12 */
439 if (ssize == MMU_SEGSIZE_256M) {
440 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
441 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
442 ((vpn & mask) >> (shift - VPN_SHIFT));
443 } else {
444 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
445 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
446 hash = vsid ^ (vsid << 25) ^
447 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
448 }
449 return hash & 0x7fffffffffUL;
450 }
451
452 #define HPTE_LOCAL_UPDATE 0x1
453 #define HPTE_NOHPTE_UPDATE 0x2
454 #define HPTE_USE_KERNEL_KEY 0x4
455
456 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
457 unsigned long rlags, unsigned long vflags, int psize, int ssize);
458 extern int __hash_page_4K(unsigned long ea, unsigned long access,
459 unsigned long vsid, pte_t *ptep, unsigned long trap,
460 unsigned long flags, int ssize, int subpage_prot);
461 extern int __hash_page_64K(unsigned long ea, unsigned long access,
462 unsigned long vsid, pte_t *ptep, unsigned long trap,
463 unsigned long flags, int ssize);
464 struct mm_struct;
465 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
466 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
467 unsigned long access, unsigned long trap,
468 unsigned long flags);
469 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
470 unsigned long dsisr);
471 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
472 int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
473 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
474 pte_t *ptep, unsigned long trap, unsigned long flags,
475 int ssize, unsigned int shift, unsigned int mmu_psize);
476 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
477 extern int __hash_page_thp(unsigned long ea, unsigned long access,
478 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
479 unsigned long flags, int ssize, unsigned int psize);
480 #else
__hash_page_thp(unsigned long ea,unsigned long access,unsigned long vsid,pmd_t * pmdp,unsigned long trap,unsigned long flags,int ssize,unsigned int psize)481 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
482 unsigned long vsid, pmd_t *pmdp,
483 unsigned long trap, unsigned long flags,
484 int ssize, unsigned int psize)
485 {
486 BUG();
487 return -1;
488 }
489 #endif
490 extern void hash_failure_debug(unsigned long ea, unsigned long access,
491 unsigned long vsid, unsigned long trap,
492 int ssize, int psize, int lpsize,
493 unsigned long pte);
494 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
495 unsigned long pstart, unsigned long prot,
496 int psize, int ssize);
497 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
498 int psize, int ssize);
499 extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
500 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
501
502 extern void hash__setup_new_exec(void);
503
504 #ifdef CONFIG_PPC_PSERIES
505 void hpte_init_pseries(void);
506 #else
hpte_init_pseries(void)507 static inline void hpte_init_pseries(void) { }
508 #endif
509
510 extern void hpte_init_native(void);
511
512 struct slb_entry {
513 u64 esid;
514 u64 vsid;
515 };
516
517 extern void slb_initialize(void);
518 void slb_flush_and_restore_bolted(void);
519 void slb_flush_all_realmode(void);
520 void __slb_restore_bolted_realmode(void);
521 void slb_restore_bolted_realmode(void);
522 void slb_save_contents(struct slb_entry *slb_ptr);
523 void slb_dump_contents(struct slb_entry *slb_ptr);
524
525 extern void slb_vmalloc_update(void);
526 extern void slb_set_size(u16 size);
527 void preload_new_slb_context(unsigned long start, unsigned long sp);
528 #endif /* __ASSEMBLY__ */
529
530 /*
531 * VSID allocation (256MB segment)
532 *
533 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
534 * from mmu context id and effective segment id of the address.
535 *
536 * For user processes max context id is limited to MAX_USER_CONTEXT.
537 * more details in get_user_context
538 *
539 * For kernel space get_kernel_context
540 *
541 * The proto-VSIDs are then scrambled into real VSIDs with the
542 * multiplicative hash:
543 *
544 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
545 *
546 * VSID_MULTIPLIER is prime, so in particular it is
547 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
548 * Because the modulus is 2^n-1 we can compute it efficiently without
549 * a divide or extra multiply (see below). The scramble function gives
550 * robust scattering in the hash table (at least based on some initial
551 * results).
552 *
553 * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
554 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
555 * will produce a VSID of 0.
556 *
557 * We also need to avoid the last segment of the last context, because that
558 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
559 * because of the modulo operation in vsid scramble.
560 */
561
562 /*
563 * Max Va bits we support as of now is 68 bits. We want 19 bit
564 * context ID.
565 * Restrictions:
566 * GPU has restrictions of not able to access beyond 128TB
567 * (47 bit effective address). We also cannot do more than 20bit PID.
568 * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
569 * to 16 bits (ie, we can only have 2^16 pids at the same time).
570 */
571 #define VA_BITS 68
572 #define CONTEXT_BITS 19
573 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
574 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
575
576 #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
577 #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
578
579 /*
580 * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
581 * to use more than one context for linear mapping the kernel.
582 * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
583 * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
584 */
585 #if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
586 #define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
587 #else
588 #define MAX_KERNEL_CTX_CNT 1
589 #endif
590
591 #define MAX_VMALLOC_CTX_CNT 1
592 #define MAX_IO_CTX_CNT 1
593 #define MAX_VMEMMAP_CTX_CNT 1
594
595 /*
596 * 256MB segment
597 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
598 * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
599 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
600 * context maps 2^49 bytes (512TB).
601 *
602 * We also need to avoid the last segment of the last context, because that
603 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
604 * because of the modulo operation in vsid scramble.
605 *
606 */
607 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
608
609 // The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
610 #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
611 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
612
613 /*
614 * For platforms that support on 65bit VA we limit the context bits
615 */
616 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
617
618 /*
619 * This should be computed such that protovosid * vsid_mulitplier
620 * doesn't overflow 64 bits. The vsid_mutliplier should also be
621 * co-prime to vsid_modulus. We also need to make sure that number
622 * of bits in multiplied result (dividend) is less than twice the number of
623 * protovsid bits for our modulus optmization to work.
624 *
625 * The below table shows the current values used.
626 * |-------+------------+----------------------+------------+-------------------|
627 * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
628 * |-------+------------+----------------------+------------+-------------------|
629 * | 1T | 24 | 25 | 49 | 50 |
630 * |-------+------------+----------------------+------------+-------------------|
631 * | 256MB | 24 | 37 | 61 | 74 |
632 * |-------+------------+----------------------+------------+-------------------|
633 *
634 * |-------+------------+----------------------+------------+--------------------|
635 * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
636 * |-------+------------+----------------------+------------+--------------------|
637 * | 1T | 24 | 28 | 52 | 56 |
638 * |-------+------------+----------------------+------------+--------------------|
639 * | 256MB | 24 | 40 | 64 | 80 |
640 * |-------+------------+----------------------+------------+--------------------|
641 *
642 */
643 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
644 #define VSID_BITS_256M (VA_BITS - SID_SHIFT)
645 #define VSID_BITS_65_256M (65 - SID_SHIFT)
646 /*
647 * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
648 */
649 #define VSID_MULINV_256M ASM_CONST(665548017062)
650
651 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
652 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
653 #define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
654 #define VSID_MULINV_1T ASM_CONST(209034062)
655
656 /* 1TB VSID reserved for VRMA */
657 #define VRMA_VSID 0x1ffffffUL
658 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
659
660 /* 4 bits per slice and we have one slice per 1TB */
661 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
662 #define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
663 #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
664 #ifndef __ASSEMBLY__
665
666 #ifdef CONFIG_PPC_SUBPAGE_PROT
667 /*
668 * For the sub-page protection option, we extend the PGD with one of
669 * these. Basically we have a 3-level tree, with the top level being
670 * the protptrs array. To optimize speed and memory consumption when
671 * only addresses < 4GB are being protected, pointers to the first
672 * four pages of sub-page protection words are stored in the low_prot
673 * array.
674 * Each page of sub-page protection words protects 1GB (4 bytes
675 * protects 64k). For the 3-level tree, each page of pointers then
676 * protects 8TB.
677 */
678 struct subpage_prot_table {
679 unsigned long maxaddr; /* only addresses < this are protected */
680 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
681 unsigned int *low_prot[4];
682 };
683
684 #define SBP_L1_BITS (PAGE_SHIFT - 2)
685 #define SBP_L2_BITS (PAGE_SHIFT - 3)
686 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
687 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
688 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
689 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
690
691 extern void subpage_prot_free(struct mm_struct *mm);
692 #else
subpage_prot_free(struct mm_struct * mm)693 static inline void subpage_prot_free(struct mm_struct *mm) {}
694 #endif /* CONFIG_PPC_SUBPAGE_PROT */
695
696 /*
697 * One bit per slice. We have lower slices which cover 256MB segments
698 * upto 4G range. That gets us 16 low slices. For the rest we track slices
699 * in 1TB size.
700 */
701 struct slice_mask {
702 u64 low_slices;
703 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
704 };
705
706 struct hash_mm_context {
707 u16 user_psize; /* page size index */
708
709 /* SLB page size encodings*/
710 unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
711 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
712 unsigned long slb_addr_limit;
713 #ifdef CONFIG_PPC_64K_PAGES
714 struct slice_mask mask_64k;
715 #endif
716 struct slice_mask mask_4k;
717 #ifdef CONFIG_HUGETLB_PAGE
718 struct slice_mask mask_16m;
719 struct slice_mask mask_16g;
720 #endif
721
722 #ifdef CONFIG_PPC_SUBPAGE_PROT
723 struct subpage_prot_table *spt;
724 #endif /* CONFIG_PPC_SUBPAGE_PROT */
725 };
726
727 #if 0
728 /*
729 * The code below is equivalent to this function for arguments
730 * < 2^VSID_BITS, which is all this should ever be called
731 * with. However gcc is not clever enough to compute the
732 * modulus (2^n-1) without a second multiply.
733 */
734 #define vsid_scramble(protovsid, size) \
735 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
736
737 /* simplified form avoiding mod operation */
738 #define vsid_scramble(protovsid, size) \
739 ({ \
740 unsigned long x; \
741 x = (protovsid) * VSID_MULTIPLIER_##size; \
742 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
743 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
744 })
745
746 #else /* 1 */
vsid_scramble(unsigned long protovsid,unsigned long vsid_multiplier,int vsid_bits)747 static inline unsigned long vsid_scramble(unsigned long protovsid,
748 unsigned long vsid_multiplier, int vsid_bits)
749 {
750 unsigned long vsid;
751 unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
752 /*
753 * We have same multipler for both 256 and 1T segements now
754 */
755 vsid = protovsid * vsid_multiplier;
756 vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
757 return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
758 }
759
760 #endif /* 1 */
761
762 /* Returns the segment size indicator for a user address */
user_segment_size(unsigned long addr)763 static inline int user_segment_size(unsigned long addr)
764 {
765 /* Use 1T segments if possible for addresses >= 1T */
766 if (addr >= (1UL << SID_SHIFT_1T))
767 return mmu_highuser_ssize;
768 return MMU_SEGSIZE_256M;
769 }
770
get_vsid(unsigned long context,unsigned long ea,int ssize)771 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
772 int ssize)
773 {
774 unsigned long va_bits = VA_BITS;
775 unsigned long vsid_bits;
776 unsigned long protovsid;
777
778 /*
779 * Bad address. We return VSID 0 for that
780 */
781 if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
782 return 0;
783
784 if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
785 va_bits = 65;
786
787 if (ssize == MMU_SEGSIZE_256M) {
788 vsid_bits = va_bits - SID_SHIFT;
789 protovsid = (context << ESID_BITS) |
790 ((ea >> SID_SHIFT) & ESID_BITS_MASK);
791 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
792 }
793 /* 1T segment */
794 vsid_bits = va_bits - SID_SHIFT_1T;
795 protovsid = (context << ESID_BITS_1T) |
796 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
797 return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
798 }
799
800 /*
801 * For kernel space, we use context ids as
802 * below. Range is 512TB per context.
803 *
804 * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
805 * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
806 * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
807 * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
808 *
809 * vmap, IO, vmemap
810 *
811 * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
812 * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
813 * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
814 *
815 */
get_kernel_context(unsigned long ea)816 static inline unsigned long get_kernel_context(unsigned long ea)
817 {
818 unsigned long region_id = get_region_id(ea);
819 unsigned long ctx;
820 /*
821 * Depending on Kernel config, kernel region can have one context
822 * or more.
823 */
824 if (region_id == LINEAR_MAP_REGION_ID) {
825 /*
826 * We already verified ea to be not beyond the addr limit.
827 */
828 ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
829 } else
830 ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
831 return ctx;
832 }
833
834 /*
835 * This is only valid for addresses >= PAGE_OFFSET
836 */
get_kernel_vsid(unsigned long ea,int ssize)837 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
838 {
839 unsigned long context;
840
841 if (!is_kernel_addr(ea))
842 return 0;
843
844 context = get_kernel_context(ea);
845 return get_vsid(context, ea, ssize);
846 }
847
848 unsigned htab_shift_for_mem_size(unsigned long mem_size);
849
850 enum slb_index {
851 LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
852 KSTACK_INDEX = 1, /* Kernel stack map */
853 };
854
855 #define slb_esid_mask(ssize) \
856 (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
857
mk_esid_data(unsigned long ea,int ssize,enum slb_index index)858 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
859 enum slb_index index)
860 {
861 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
862 }
863
__mk_vsid_data(unsigned long vsid,int ssize,unsigned long flags)864 static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
865 unsigned long flags)
866 {
867 return (vsid << slb_vsid_shift(ssize)) | flags |
868 ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
869 }
870
mk_vsid_data(unsigned long ea,int ssize,unsigned long flags)871 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
872 unsigned long flags)
873 {
874 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
875 }
876
877 #endif /* __ASSEMBLY__ */
878 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
879