1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  */
7 
8 #ifndef _S390_PAGE_H
9 #define _S390_PAGE_H
10 
11 #include <linux/const.h>
12 #include <asm/types.h>
13 
14 #define _PAGE_SHIFT	12
15 #define _PAGE_SIZE	(_AC(1, UL) << _PAGE_SHIFT)
16 #define _PAGE_MASK	(~(_PAGE_SIZE - 1))
17 
18 /* PAGE_SHIFT determines the page size */
19 #define PAGE_SHIFT	_PAGE_SHIFT
20 #define PAGE_SIZE	_PAGE_SIZE
21 #define PAGE_MASK	_PAGE_MASK
22 #define PAGE_DEFAULT_ACC	0
23 #define PAGE_DEFAULT_KEY	(PAGE_DEFAULT_ACC << 4)
24 
25 #define HPAGE_SHIFT	20
26 #define HPAGE_SIZE	(1UL << HPAGE_SHIFT)
27 #define HPAGE_MASK	(~(HPAGE_SIZE - 1))
28 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
29 #define HUGE_MAX_HSTATE		2
30 
31 #define ARCH_HAS_SETCLEAR_HUGE_PTE
32 #define ARCH_HAS_HUGE_PTE_TYPE
33 #define ARCH_HAS_PREPARE_HUGEPAGE
34 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
35 
36 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
37 
38 #include <asm/setup.h>
39 #ifndef __ASSEMBLY__
40 
41 void __storage_key_init_range(unsigned long start, unsigned long end);
42 
storage_key_init_range(unsigned long start,unsigned long end)43 static inline void storage_key_init_range(unsigned long start, unsigned long end)
44 {
45 	if (PAGE_DEFAULT_KEY != 0)
46 		__storage_key_init_range(start, end);
47 }
48 
49 #define clear_page(page)	memset((page), 0, PAGE_SIZE)
50 
51 /*
52  * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53  * bypass caches when copying a page. Especially when copying huge pages
54  * this keeps L1 and L2 data caches alive.
55  */
copy_page(void * to,void * from)56 static inline void copy_page(void *to, void *from)
57 {
58 	union register_pair dst, src;
59 
60 	dst.even = (unsigned long) to;
61 	dst.odd  = 0x1000;
62 	src.even = (unsigned long) from;
63 	src.odd  = 0xb0001000;
64 
65 	asm volatile(
66 		"	mvcl	%[dst],%[src]"
67 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
68 		: : "memory", "cc");
69 }
70 
71 #define clear_user_page(page, vaddr, pg)	clear_page(page)
72 #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
73 
74 #define alloc_zeroed_user_highpage_movable(vma, vaddr) \
75 	alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
76 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
77 
78 /*
79  * These are used to make use of C type-checking..
80  */
81 
82 typedef struct { unsigned long pgprot; } pgprot_t;
83 typedef struct { unsigned long pgste; } pgste_t;
84 typedef struct { unsigned long pte; } pte_t;
85 typedef struct { unsigned long pmd; } pmd_t;
86 typedef struct { unsigned long pud; } pud_t;
87 typedef struct { unsigned long p4d; } p4d_t;
88 typedef struct { unsigned long pgd; } pgd_t;
89 typedef pte_t *pgtable_t;
90 
91 #define pgprot_val(x)	((x).pgprot)
92 #define pgste_val(x)	((x).pgste)
93 #define pte_val(x)	((x).pte)
94 #define pmd_val(x)	((x).pmd)
95 #define pud_val(x)	((x).pud)
96 #define p4d_val(x)	((x).p4d)
97 #define pgd_val(x)      ((x).pgd)
98 
99 #define __pgste(x)	((pgste_t) { (x) } )
100 #define __pte(x)        ((pte_t) { (x) } )
101 #define __pmd(x)        ((pmd_t) { (x) } )
102 #define __pud(x)	((pud_t) { (x) } )
103 #define __p4d(x)	((p4d_t) { (x) } )
104 #define __pgd(x)        ((pgd_t) { (x) } )
105 #define __pgprot(x)     ((pgprot_t) { (x) } )
106 
page_set_storage_key(unsigned long addr,unsigned char skey,int mapped)107 static inline void page_set_storage_key(unsigned long addr,
108 					unsigned char skey, int mapped)
109 {
110 	if (!mapped)
111 		asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
112 			     : : "d" (skey), "a" (addr));
113 	else
114 		asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
115 }
116 
page_get_storage_key(unsigned long addr)117 static inline unsigned char page_get_storage_key(unsigned long addr)
118 {
119 	unsigned char skey;
120 
121 	asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
122 	return skey;
123 }
124 
page_reset_referenced(unsigned long addr)125 static inline int page_reset_referenced(unsigned long addr)
126 {
127 	int cc;
128 
129 	asm volatile(
130 		"	rrbe	0,%1\n"
131 		"	ipm	%0\n"
132 		"	srl	%0,28\n"
133 		: "=d" (cc) : "a" (addr) : "cc");
134 	return cc;
135 }
136 
137 /* Bits int the storage key */
138 #define _PAGE_CHANGED		0x02	/* HW changed bit		*/
139 #define _PAGE_REFERENCED	0x04	/* HW referenced bit		*/
140 #define _PAGE_FP_BIT		0x08	/* HW fetch protection bit	*/
141 #define _PAGE_ACC_BITS		0xf0	/* HW access control bits	*/
142 
143 struct page;
144 void arch_free_page(struct page *page, int order);
145 void arch_alloc_page(struct page *page, int order);
146 void arch_set_page_dat(struct page *page, int order);
147 
devmem_is_allowed(unsigned long pfn)148 static inline int devmem_is_allowed(unsigned long pfn)
149 {
150 	return 0;
151 }
152 
153 #define HAVE_ARCH_FREE_PAGE
154 #define HAVE_ARCH_ALLOC_PAGE
155 
156 #if IS_ENABLED(CONFIG_PGSTE)
157 int arch_make_page_accessible(struct page *page);
158 #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
159 #endif
160 
161 #endif /* !__ASSEMBLY__ */
162 
163 #define __PAGE_OFFSET		0x0UL
164 #define PAGE_OFFSET		0x0UL
165 
166 #define __pa(x)			((unsigned long)(x))
167 #define __va(x)			((void *)(unsigned long)(x))
168 
169 #define phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
170 #define pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
171 
172 #define phys_to_page(phys)	pfn_to_page(phys_to_pfn(phys))
173 #define page_to_phys(page)	pfn_to_phys(page_to_pfn(page))
174 
175 #define pfn_to_virt(pfn)	__va(pfn_to_phys(pfn))
176 #define virt_to_pfn(kaddr)	(phys_to_pfn(__pa(kaddr)))
177 #define pfn_to_kaddr(pfn)	pfn_to_virt(pfn)
178 
179 #define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
180 #define page_to_virt(page)	pfn_to_virt(page_to_pfn(page))
181 
182 #define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
183 
184 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
185 
186 #include <asm-generic/memory_model.h>
187 #include <asm-generic/getorder.h>
188 
189 #endif /* _S390_PAGE_H */
190