1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
3 #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
4
5 /*
6 * 32-bit hash table MMU support
7 */
8
9 /*
10 * BATs
11 */
12
13 /* Block size masks */
14 #define BL_128K 0x000
15 #define BL_256K 0x001
16 #define BL_512K 0x003
17 #define BL_1M 0x007
18 #define BL_2M 0x00F
19 #define BL_4M 0x01F
20 #define BL_8M 0x03F
21 #define BL_16M 0x07F
22 #define BL_32M 0x0FF
23 #define BL_64M 0x1FF
24 #define BL_128M 0x3FF
25 #define BL_256M 0x7FF
26
27 /* BAT Access Protection */
28 #define BPP_XX 0x00 /* No access */
29 #define BPP_RX 0x01 /* Read only */
30 #define BPP_RW 0x02 /* Read/write */
31
32 #ifndef __ASSEMBLY__
33 /* Contort a phys_addr_t into the right format/bits for a BAT */
34 #ifdef CONFIG_PHYS_64BIT
35 #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
36 ((x & 0x0000000e00000000ULL) >> 24) | \
37 ((x & 0x0000000100000000ULL) >> 30)))
38 #define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
39 (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
40 (((u64)(x) << 30) & 0x0000000100000000ULL))
41 #else
42 #define BAT_PHYS_ADDR(x) (x)
43 #define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
44 #endif
45
46 struct ppc_bat {
47 u32 batu;
48 u32 batl;
49 };
50 #endif /* !__ASSEMBLY__ */
51
52 /*
53 * Hash table
54 */
55
56 /* Values for PP (assumes Ks=0, Kp=1) */
57 #define PP_RWXX 0 /* Supervisor read/write, User none */
58 #define PP_RWRX 1 /* Supervisor read/write, User read */
59 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
60 #define PP_RXRX 3 /* Supervisor read, User read */
61
62 /* Values for Segment Registers */
63 #define SR_NX 0x10000000 /* No Execute */
64 #define SR_KP 0x20000000 /* User key */
65 #define SR_KS 0x40000000 /* Supervisor key */
66
67 #ifndef __ASSEMBLY__
68
69 /*
70 * This macro defines the mapping from contexts to VSIDs (virtual
71 * segment IDs). We use a skew on both the context and the high 4 bits
72 * of the 32-bit virtual address (the "effective segment ID") in order
73 * to spread out the entries in the MMU hash table. Note, if this
74 * function is changed then hash functions will have to be
75 * changed to correspond.
76 */
77 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
78
79 /*
80 * Hardware Page Table Entry
81 * Note that the xpn and x bitfields are used only by processors that
82 * support extended addressing; otherwise, those bits are reserved.
83 */
84 struct hash_pte {
85 unsigned long v:1; /* Entry is valid */
86 unsigned long vsid:24; /* Virtual segment identifier */
87 unsigned long h:1; /* Hash algorithm indicator */
88 unsigned long api:6; /* Abbreviated page index */
89 unsigned long rpn:20; /* Real (physical) page number */
90 unsigned long xpn:3; /* Real page number bits 0-2, optional */
91 unsigned long r:1; /* Referenced */
92 unsigned long c:1; /* Changed */
93 unsigned long w:1; /* Write-thru cache mode */
94 unsigned long i:1; /* Cache inhibited */
95 unsigned long m:1; /* Memory coherence */
96 unsigned long g:1; /* Guarded */
97 unsigned long x:1; /* Real page number bit 3, optional */
98 unsigned long pp:2; /* Page protection */
99 };
100
101 typedef struct {
102 unsigned long id;
103 void __user *vdso;
104 } mm_context_t;
105
106 void update_bats(void);
cleanup_cpu_mmu_context(void)107 static inline void cleanup_cpu_mmu_context(void) { }
108
109 /* patch sites */
110 extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
111 extern s32 patch__hash_page_B, patch__hash_page_C;
112 extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
113 extern s32 patch__flush_hash_B;
114
115 #include <asm/reg.h>
116 #include <asm/task_size_32.h>
117
update_user_segment(u32 n,u32 val)118 static __always_inline void update_user_segment(u32 n, u32 val)
119 {
120 if (n << 28 < TASK_SIZE)
121 mtsr(val + n * 0x111, n << 28);
122 }
123
update_user_segments(u32 val)124 static __always_inline void update_user_segments(u32 val)
125 {
126 val &= 0xf0ffffff;
127
128 update_user_segment(0, val);
129 update_user_segment(1, val);
130 update_user_segment(2, val);
131 update_user_segment(3, val);
132 update_user_segment(4, val);
133 update_user_segment(5, val);
134 update_user_segment(6, val);
135 update_user_segment(7, val);
136 update_user_segment(8, val);
137 update_user_segment(9, val);
138 update_user_segment(10, val);
139 update_user_segment(11, val);
140 update_user_segment(12, val);
141 update_user_segment(13, val);
142 update_user_segment(14, val);
143 update_user_segment(15, val);
144 }
145
146 #endif /* !__ASSEMBLY__ */
147
148 /* We happily ignore the smaller BATs on 601, we don't actually use
149 * those definitions on hash32 at the moment anyway
150 */
151 #define mmu_virtual_psize MMU_PAGE_4K
152 #define mmu_linear_psize MMU_PAGE_256M
153
154 #endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */
155