Lines Matching refs:esr
46 int (*fn)(unsigned long far, unsigned int esr,
56 static inline const struct fault_info *esr_to_fault_info(unsigned int esr) in esr_to_fault_info() argument
58 return fault_info + (esr & ESR_ELx_FSC); in esr_to_fault_info()
61 static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) in esr_to_debug_fault_info() argument
63 return debug_fault_info + DBG_ESR_EVT(esr); in esr_to_debug_fault_info()
66 static void data_abort_decode(unsigned int esr) in data_abort_decode() argument
70 if (esr & ESR_ELx_ISV) { in data_abort_decode()
72 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); in data_abort_decode()
74 (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, in data_abort_decode()
75 (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); in data_abort_decode()
77 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, in data_abort_decode()
78 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); in data_abort_decode()
80 pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); in data_abort_decode()
84 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, in data_abort_decode()
85 (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); in data_abort_decode()
88 static void mem_abort_decode(unsigned int esr) in mem_abort_decode() argument
92 pr_alert(" ESR = 0x%08x\n", esr); in mem_abort_decode()
94 ESR_ELx_EC(esr), esr_get_class_string(esr), in mem_abort_decode()
95 (esr & ESR_ELx_IL) ? 32 : 16); in mem_abort_decode()
97 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, in mem_abort_decode()
98 (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); in mem_abort_decode()
100 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, in mem_abort_decode()
101 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); in mem_abort_decode()
102 pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), in mem_abort_decode()
103 esr_to_fault_info(esr)->name); in mem_abort_decode()
105 if (esr_is_data_abort(esr)) in mem_abort_decode()
106 data_abort_decode(esr); in mem_abort_decode()
232 static bool is_el1_instruction_abort(unsigned int esr) in is_el1_instruction_abort() argument
234 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; in is_el1_instruction_abort()
237 static bool is_el1_data_abort(unsigned int esr) in is_el1_data_abort() argument
239 return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; in is_el1_data_abort()
242 static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, in is_el1_permission_fault() argument
245 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; in is_el1_permission_fault()
247 if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) in is_el1_permission_fault()
261 unsigned int esr, in is_spurious_el1_translation_fault() argument
267 if (!is_el1_data_abort(esr) || in is_spurious_el1_translation_fault()
268 (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) in is_spurious_el1_translation_fault()
293 unsigned int esr, struct pt_regs *regs) in die_kernel_fault() argument
300 mem_abort_decode(esr); in die_kernel_fault()
303 die("Oops", regs, esr); in die_kernel_fault()
309 static void report_tag_fault(unsigned long addr, unsigned int esr, in report_tag_fault() argument
316 bool is_write = !!(esr & ESR_ELx_WNR); in report_tag_fault()
321 static inline void report_tag_fault(unsigned long addr, unsigned int esr, in report_tag_fault() argument
325 static void do_tag_recovery(unsigned long addr, unsigned int esr, in do_tag_recovery() argument
329 report_tag_fault(addr, esr, regs); in do_tag_recovery()
340 static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) in is_el1_mte_sync_tag_check_fault() argument
342 unsigned int fsc = esr & ESR_ELx_FSC; in is_el1_mte_sync_tag_check_fault()
344 if (!is_el1_data_abort(esr)) in is_el1_mte_sync_tag_check_fault()
353 static void __do_kernel_fault(unsigned long addr, unsigned int esr, in __do_kernel_fault() argument
362 if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) in __do_kernel_fault()
365 if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), in __do_kernel_fault()
369 if (is_el1_mte_sync_tag_check_fault(esr)) { in __do_kernel_fault()
370 do_tag_recovery(addr, esr, regs); in __do_kernel_fault()
375 if (is_el1_permission_fault(addr, esr, regs)) { in __do_kernel_fault()
376 if (esr & ESR_ELx_WNR) in __do_kernel_fault()
378 else if (is_el1_instruction_abort(esr)) in __do_kernel_fault()
385 if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) in __do_kernel_fault()
391 die_kernel_fault(msg, addr, esr, regs); in __do_kernel_fault()
394 static void set_thread_esr(unsigned long address, unsigned int esr) in set_thread_esr() argument
411 switch (ESR_ELx_EC(esr)) { in set_thread_esr()
422 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | in set_thread_esr()
424 esr |= ESR_ELx_FSC_FAULT; in set_thread_esr()
432 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; in set_thread_esr()
433 esr |= ESR_ELx_FSC_FAULT; in set_thread_esr()
442 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); in set_thread_esr()
443 esr = 0; in set_thread_esr()
448 current->thread.fault_code = esr; in set_thread_esr()
451 static void do_bad_area(unsigned long far, unsigned int esr, in do_bad_area() argument
461 const struct fault_info *inf = esr_to_fault_info(esr); in do_bad_area()
463 set_thread_esr(addr, esr); in do_bad_area()
466 __do_kernel_fault(addr, esr, regs); in do_bad_area()
502 static bool is_el0_instruction_abort(unsigned int esr) in is_el0_instruction_abort() argument
504 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; in is_el0_instruction_abort()
511 static bool is_write_abort(unsigned int esr) in is_write_abort() argument
513 return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); in is_write_abort()
516 static int __kprobes do_page_fault(unsigned long far, unsigned int esr, in do_page_fault() argument
526 if (kprobe_page_fault(regs, esr)) in do_page_fault()
545 if (is_el0_instruction_abort(esr)) { in do_page_fault()
549 } else if (is_write_abort(esr)) { in do_page_fault()
563 if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { in do_page_fault()
564 if (is_el1_instruction_abort(esr)) in do_page_fault()
566 addr, esr, regs); in do_page_fault()
570 addr, esr, regs); in do_page_fault()
640 inf = esr_to_fault_info(esr); in do_page_fault()
641 set_thread_esr(addr, esr); in do_page_fault()
669 __do_kernel_fault(addr, esr, regs); in do_page_fault()
674 unsigned int esr, in do_translation_fault() argument
680 return do_page_fault(far, esr, regs); in do_translation_fault()
682 do_bad_area(far, esr, regs); in do_translation_fault()
686 static int do_alignment_fault(unsigned long far, unsigned int esr, in do_alignment_fault() argument
689 do_bad_area(far, esr, regs); in do_alignment_fault()
693 static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) in do_bad() argument
698 static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) in do_sea() argument
703 inf = esr_to_fault_info(esr); in do_sea()
713 if (esr & ESR_ELx_FnV) { in do_sea()
723 arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); in do_sea()
728 static int do_tag_check_fault(unsigned long far, unsigned int esr, in do_tag_check_fault() argument
737 do_bad_area(far, esr, regs); in do_tag_check_fault()
808 void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) in do_mem_abort() argument
810 const struct fault_info *inf = esr_to_fault_info(esr); in do_mem_abort()
813 if (!inf->fn(far, esr, regs)) in do_mem_abort()
818 mem_abort_decode(esr); in do_mem_abort()
827 arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); in do_mem_abort()
831 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) in do_sp_pc_abort() argument
834 addr, esr); in do_sp_pc_abort()
838 int __init early_brk64(unsigned long addr, unsigned int esr,
891 void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, in do_debug_exception() argument
894 const struct fault_info *inf = esr_to_debug_fault_info(esr); in do_debug_exception()
902 if (inf->fn(addr_if_watchpoint, esr, regs)) { in do_debug_exception()
903 arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); in do_debug_exception()