/linux/arch/mips/kernel/ |
A D | mips-r2-to-r6-emul.c | 2243 (unsigned long)__this_cpu_read(mipsr2emustats.movs), in mipsr2_emul_show() 2246 (unsigned long)__this_cpu_read(mipsr2emustats.hilo), in mipsr2_emul_show() 2249 (unsigned long)__this_cpu_read(mipsr2emustats.muls), in mipsr2_emul_show() 2252 (unsigned long)__this_cpu_read(mipsr2emustats.divs), in mipsr2_emul_show() 2255 (unsigned long)__this_cpu_read(mipsr2emustats.dsps), in mipsr2_emul_show() 2258 (unsigned long)__this_cpu_read(mipsr2emustats.bops), in mipsr2_emul_show() 2261 (unsigned long)__this_cpu_read(mipsr2emustats.traps), in mipsr2_emul_show() 2264 (unsigned long)__this_cpu_read(mipsr2emustats.fpus), in mipsr2_emul_show() 2267 (unsigned long)__this_cpu_read(mipsr2emustats.loads), in mipsr2_emul_show() 2270 (unsigned long)__this_cpu_read(mipsr2emustats.stores), in mipsr2_emul_show() [all …]
|
/linux/arch/sparc/kernel/ |
A D | nmi.c | 102 if (__this_cpu_read(nmi_touch)) { in perfctr_irq() 106 if (!touched && __this_cpu_read(last_irq_sum) == sum) { in perfctr_irq() 108 if (__this_cpu_read(alert_counter) == 30 * nmi_hz) in perfctr_irq() 115 if (__this_cpu_read(wd_enabled)) { in perfctr_irq() 155 if (!__this_cpu_read(wd_enabled)) in stop_nmi_watchdog() 212 if (__this_cpu_read(wd_enabled)) in start_nmi_watchdog() 226 if (!__this_cpu_read(wd_enabled)) in nmi_adjust_hz_one()
|
/linux/kernel/ |
A D | context_tracking.c | 70 if ( __this_cpu_read(context_tracking.state) != state) { in __context_tracking_enter() 71 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_enter() 151 if (__this_cpu_read(context_tracking.state) == state) { in __context_tracking_exit() 152 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_exit()
|
A D | softirq.c | 77 struct task_struct *tsk = __this_cpu_read(ksoftirqd); in wakeup_softirqd() 91 struct task_struct *tsk = __this_cpu_read(ksoftirqd); in ksoftirqd_running() 155 return __this_cpu_read(softirq_ctrl.cnt) != 0; in local_bh_blocked() 229 curcnt = __this_cpu_read(softirq_ctrl.cnt); in __local_bh_enable_ip() 425 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { in invoke_softirq() 571 __this_cpu_read(ksoftirqd) == current) in __do_softirq() 937 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 945 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; in takeover_tasklets()
|
A D | watchdog_hld.c | 80 delta = now - __this_cpu_read(last_timestamp); in watchdog_check_timestamp() 117 if (__this_cpu_read(watchdog_nmi_touch) == true) { in watchdog_overflow_callback() 135 if (__this_cpu_read(hard_watchdog_warn) == true) in watchdog_overflow_callback()
|
A D | watchdog.c | 320 unsigned long hrint = __this_cpu_read(hrtimer_interrupts); in is_hardlockup() 322 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) in is_hardlockup() 398 if (unlikely(__this_cpu_read(softlockup_touch_sync))) { in watchdog_timer_fn() 412 touch_ts = __this_cpu_read(watchdog_touch_ts); in watchdog_timer_fn()
|
/linux/arch/x86/kernel/cpu/mce/ |
A D | intel.c | 132 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) in mce_intel_cmci_poll() 179 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { in cmci_intel_adjust_timer() 184 switch (__this_cpu_read(cmci_storm_state)) { in cmci_intel_adjust_timer() 218 unsigned int cnt = __this_cpu_read(cmci_storm_cnt); in cmci_storm_detect() 219 unsigned long ts = __this_cpu_read(cmci_time_stamp); in cmci_storm_detect() 223 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) in cmci_storm_detect()
|
/linux/kernel/time/ |
A D | tick-oneshot.c | 25 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_program_event() 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_resume_oneshot() 112 ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; in tick_oneshot_mode_active()
|
/linux/include/asm-generic/ |
A D | irq_regs.h | 21 return __this_cpu_read(__irq_regs); in get_irq_regs() 28 old_regs = __this_cpu_read(__irq_regs); in set_irq_regs()
|
/linux/lib/ |
A D | percpu_test.c | 11 WARN(__this_cpu_read(pcp) != (expected), \ 13 __this_cpu_read(pcp), __this_cpu_read(pcp), \
|
A D | cpumask.c | 252 prev = __this_cpu_read(distribute_cpu_mask_prev); in cpumask_any_and_distribute() 270 prev = __this_cpu_read(distribute_cpu_mask_prev); in cpumask_any_distribute()
|
A D | percpu_counter.c | 87 count = __this_cpu_read(*fbc->counters) + amount; in percpu_counter_add_batch() 113 count = __this_cpu_read(*fbc->counters); in percpu_counter_sync()
|
/linux/arch/x86/kernel/ |
A D | hw_breakpoint.c | 485 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); in hw_breakpoint_restore() 486 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); in hw_breakpoint_restore() 487 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); in hw_breakpoint_restore() 488 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); in hw_breakpoint_restore() 490 set_debugreg(__this_cpu_read(cpu_dr7), 7); in hw_breakpoint_restore()
|
A D | irq.c | 248 desc = __this_cpu_read(vector_irq[vector]); in DEFINE_IDTENTRY_IRQ() 359 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) in fixup_irqs() 364 desc = __this_cpu_read(vector_irq[vector]); in fixup_irqs() 375 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) in fixup_irqs()
|
A D | irq_32.c | 80 irqstk = __this_cpu_read(hardirq_stack_ptr); in execute_on_irq_stack() 141 irqstk = __this_cpu_read(softirq_stack_ptr); in do_softirq_own_stack()
|
A D | kvm.c | 234 if (__this_cpu_read(apf_reason.enabled)) { in kvm_read_and_reset_apf_flags() 235 flags = __this_cpu_read(apf_reason.flags); in kvm_read_and_reset_apf_flags() 285 if (__this_cpu_read(apf_reason.enabled)) { in DEFINE_IDTENTRY_SYSVEC() 286 token = __this_cpu_read(apf_reason.token); in DEFINE_IDTENTRY_SYSVEC() 373 if (!__this_cpu_read(apf_reason.enabled)) in kvm_pv_disable_apf()
|
/linux/include/linux/ |
A D | context_tracking_state.h | 41 return context_tracking_enabled() && __this_cpu_read(context_tracking.active); in context_tracking_enabled_this_cpu() 46 return __this_cpu_read(context_tracking.state) == CONTEXT_USER; in context_tracking_in_user()
|
/linux/drivers/cpuidle/ |
A D | cpuidle-psci.c | 48 return __this_cpu_read(domain_state); in psci_get_domain_state() 113 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); in psci_idle_cpuhp_up() 123 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); in psci_idle_cpuhp_down() 152 u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); in psci_enter_idle_state()
|
/linux/drivers/xen/events/ |
A D | events_2l.c | 123 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_unmask() 173 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_handle_events() 192 start_word_idx = __this_cpu_read(current_word_idx); in evtchn_2l_handle_events() 193 start_bit_idx = __this_cpu_read(current_bit_idx); in evtchn_2l_handle_events()
|
/linux/kernel/rcu/ |
A D | tree_plugin.h | 285 if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { in rcu_qs() 287 __this_cpu_read(rcu_data.gp_seq), in rcu_qs() 583 return (__this_cpu_read(rcu_data.exp_deferred_qs) || in rcu_preempt_need_deferred_qs() 732 __this_cpu_read(rcu_data.core_needs_qs) && in rcu_flavor_sched_clock_irq() 733 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && in rcu_flavor_sched_clock_irq() 843 if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) in rcu_qs() 846 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); in rcu_qs() 848 if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) in rcu_qs() 1140 return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; in rcu_is_callbacks_kthread()
|
/linux/arch/x86/include/asm/ |
A D | irq_stack.h | 117 call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ 136 if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
|
A D | cpu_entry_area.h | 152 CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name) 155 CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
|
/linux/mm/ |
A D | vmstat.c | 360 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 362 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state() 398 x = delta + __this_cpu_read(*p); in __mod_node_page_state() 400 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state() 447 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state() 472 t = __this_cpu_read(pcp->stat_threshold); in __inc_node_state() 507 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state() 532 t = __this_cpu_read(pcp->stat_threshold); in __dec_node_state() 858 if (!__this_cpu_read(pcp->expire) || in refresh_cpu_vm_stats() 859 !__this_cpu_read(pcp->count)) in refresh_cpu_vm_stats() [all …]
|
/linux/drivers/irqchip/ |
A D | irq-xtensa-mx.c | 82 mask = __this_cpu_read(cached_irq_mask) & ~mask; in xtensa_mx_irq_mask() 100 mask |= __this_cpu_read(cached_irq_mask); in xtensa_mx_irq_unmask()
|
/linux/arch/arm64/include/asm/ |
A D | arch_timer.h | 27 __wa = __this_cpu_read(timer_unstable_counter_workaround); \ 34 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
|