1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * MMU-generic set_memory implementation for powerpc
5 *
6 * Copyright 2019-2021, IBM Corporation.
7 */
8
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/set_memory.h>
12
13 #include <asm/mmu.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16
17
18 /*
19 * Updates the attributes of a page in three steps:
20 *
21 * 1. take the page_table_lock
22 * 2. install the new entry with the updated attributes
23 * 3. flush the TLB
24 *
25 * This sequence is safe against concurrent updates, and also allows updating the
26 * attributes of a page currently being executed or accessed.
27 */
change_page_attr(pte_t * ptep,unsigned long addr,void * data)28 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
29 {
30 long action = (long)data;
31 pte_t pte;
32
33 spin_lock(&init_mm.page_table_lock);
34
35 pte = ptep_get(ptep);
36
37 /* modify the PTE bits as desired, then apply */
38 switch (action) {
39 case SET_MEMORY_RO:
40 pte = pte_wrprotect(pte);
41 break;
42 case SET_MEMORY_RW:
43 pte = pte_mkwrite(pte_mkdirty(pte));
44 break;
45 case SET_MEMORY_NX:
46 pte = pte_exprotect(pte);
47 break;
48 case SET_MEMORY_X:
49 pte = pte_mkexec(pte);
50 break;
51 default:
52 WARN_ON_ONCE(1);
53 break;
54 }
55
56 pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);
57
58 /* See ptesync comment in radix__set_pte_at() */
59 if (radix_enabled())
60 asm volatile("ptesync": : :"memory");
61
62 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
63
64 spin_unlock(&init_mm.page_table_lock);
65
66 return 0;
67 }
68
change_memory_attr(unsigned long addr,int numpages,long action)69 int change_memory_attr(unsigned long addr, int numpages, long action)
70 {
71 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
72 unsigned long size = numpages * PAGE_SIZE;
73
74 if (!numpages)
75 return 0;
76
77 if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
78 is_vm_area_hugepages((void *)addr)))
79 return -EINVAL;
80
81 #ifdef CONFIG_PPC_BOOK3S_64
82 /*
83 * On hash, the linear mapping is not in the Linux page table so
84 * apply_to_existing_page_range() will have no effect. If in the future
85 * the set_memory_* functions are used on the linear map this will need
86 * to be updated.
87 */
88 if (!radix_enabled()) {
89 int region = get_region_id(addr);
90
91 if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
92 return -EINVAL;
93 }
94 #endif
95
96 return apply_to_existing_page_range(&init_mm, start, size,
97 change_page_attr, (void *)action);
98 }
99
100 /*
101 * Set the attributes of a page:
102 *
103 * This function is used by PPC32 at the end of init to set final kernel memory
104 * protection. It includes changing the maping of the page it is executing from
105 * and data pages it is using.
106 */
set_page_attr(pte_t * ptep,unsigned long addr,void * data)107 static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
108 {
109 pgprot_t prot = __pgprot((unsigned long)data);
110
111 spin_lock(&init_mm.page_table_lock);
112
113 set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
114 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
115
116 spin_unlock(&init_mm.page_table_lock);
117
118 return 0;
119 }
120
set_memory_attr(unsigned long addr,int numpages,pgprot_t prot)121 int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
122 {
123 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
124 unsigned long sz = numpages * PAGE_SIZE;
125
126 if (numpages <= 0)
127 return 0;
128
129 return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
130 (void *)pgprot_val(prot));
131 }
132