/linux/arch/x86/xen/ |
A D | smp.c | 37 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 38 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 39 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 43 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free() 47 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { in xen_smp_intr_free() 49 per_cpu(xen_debug_irq, cpu).irq = -1; in xen_smp_intr_free() 50 kfree(per_cpu(xen_debug_irq, cpu).name); in xen_smp_intr_free() 51 per_cpu(xen_debug_irq, cpu).name = NULL; in xen_smp_intr_free() 76 per_cpu(xen_resched_irq, cpu).irq = rc; in xen_smp_intr_init() 88 per_cpu(xen_callfunc_irq, cpu).irq = rc; in xen_smp_intr_init() [all …]
|
A D | spinlock.c | 25 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick() 74 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu() 75 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu() 87 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu() 88 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu() 105 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu() 110 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu() 111 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu() 112 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
|
A D | smp_pv.c | 100 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv() 102 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv() 103 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() 104 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv() 107 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv() 109 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv() 110 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv() 111 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv() 129 per_cpu(xen_irq_work, cpu).irq = rc; in xen_smp_intr_init_pv() 140 per_cpu(xen_pmu_irq, cpu).irq = rc; in xen_smp_intr_init_pv() [all …]
|
/linux/kernel/ |
A D | smpboot.c | 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 41 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu() 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state() 359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare() 441 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_wait_death() 445 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); in cpu_wait_death() 448 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), in cpu_wait_death() 472 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_death() [all …]
|
A D | softirq.c | 898 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 899 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 900 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 901 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() 936 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() 938 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets() 939 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets() 940 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 944 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { in takeover_tasklets() 947 per_cpu(tasklet_hi_vec, cpu).head = NULL; in takeover_tasklets() [all …]
|
A D | profile.c | 245 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 253 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 257 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 276 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 296 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits() 344 if (per_cpu(cpu_profile_hits, cpu)[i]) { in profile_dead_cpu() 345 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu() 346 per_cpu(cpu_profile_hits, cpu)[i] = NULL; in profile_dead_cpu() 358 per_cpu(cpu_profile_flip, cpu) = 0; in profile_prepare_cpu() 361 if (per_cpu(cpu_profile_hits, cpu)[i]) in profile_prepare_cpu() [all …]
|
/linux/drivers/cpufreq/ |
A D | speedstep-centrino.c | 261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock() 304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock() 309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock() 313 return per_cpu(centrino_model, cpu)-> in extract_clock() 367 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; in centrino_cpu_init() 369 if (!per_cpu(centrino_cpu, policy->cpu)) { in centrino_cpu_init() 407 if (!per_cpu(centrino_model, cpu)) in centrino_cpu_exit() 410 per_cpu(centrino_model, cpu) = NULL; in centrino_cpu_exit() [all …]
|
A D | vexpress-spc-cpufreq.c | 78 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq() 80 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq() 90 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 103 return per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_get_rate() 118 prev_rate = per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_set_rate() 119 per_cpu(cpu_last_req_freq, cpu) = rate; in ve_spc_cpufreq_set_rate() 120 per_cpu(physical_cluster, cpu) = new_cluster; in ve_spc_cpufreq_set_rate() 144 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in ve_spc_cpufreq_set_rate() 145 per_cpu(physical_cluster, cpu) = old_cluster; in ve_spc_cpufreq_set_rate() 430 per_cpu(physical_cluster, cpu) = cur_cluster; in ve_spc_cpufreq_init() [all …]
|
/linux/drivers/perf/ |
A D | arm_pmu_acpi.c | 161 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs() 169 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs() 180 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs() 181 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs() 195 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu() 227 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches() 259 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting() 262 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_cpu_starting() 268 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_cpu_starting() 272 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_cpu_starting() [all …]
|
/linux/arch/powerpc/kernel/ |
A D | irq.c | 610 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts() 629 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts() 656 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; in arch_irq_stat_cpu() 658 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; in arch_irq_stat_cpu() 659 sum += per_cpu(irq_stat, cpu).pmu_irqs; in arch_irq_stat_cpu() 660 sum += per_cpu(irq_stat, cpu).mce_exceptions; in arch_irq_stat_cpu() 661 sum += per_cpu(irq_stat, cpu).spurious_irqs; in arch_irq_stat_cpu() 662 sum += per_cpu(irq_stat, cpu).timer_irqs_others; in arch_irq_stat_cpu() 666 sum += per_cpu(irq_stat, cpu).sreset_irqs; in arch_irq_stat_cpu() 668 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; in arch_irq_stat_cpu() [all …]
|
A D | watchdog.c | 122 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi() 123 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi() 246 per_cpu(wd_timer_tb, cpu) = tb; in watchdog_timer_interrupt() 269 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { in DEFINE_INTERRUPT_HANDLER_NMI() 280 cpu, tb, per_cpu(wd_timer_tb, cpu), in DEFINE_INTERRUPT_HANDLER_NMI() 281 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in DEFINE_INTERRUPT_HANDLER_NMI() 323 if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { in arch_touch_nmi_watchdog() 324 per_cpu(wd_timer_tb, cpu) = tb; in arch_touch_nmi_watchdog()
|
/linux/arch/arm/mm/ |
A D | context.c | 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context() 153 asid = per_cpu(reserved_asids, i); in flush_context() 155 per_cpu(reserved_asids, i) = asid; in flush_context() 180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid() 182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid() 255 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context() 271 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
|
A D | proc-v7-bugs.c | 44 if (per_cpu(harden_branch_predictor_fn, cpu)) in cpu_v7_spectre_init() 54 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 61 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 87 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init() 94 per_cpu(harden_branch_predictor_fn, cpu) = in cpu_v7_spectre_init()
|
/linux/arch/ia64/mm/ |
A D | tlb.c | 101 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context() 397 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init() 401 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init() 403 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init() 405 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init() 407 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init() 472 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 516 if (i >= per_cpu(ia64_tr_num, cpu)) in ia64_itr_entry() 520 if (i > per_cpu(ia64_tr_used, cpu)) in ia64_itr_entry() 521 per_cpu(ia64_tr_used, cpu) = i; in ia64_itr_entry() [all …]
|
/linux/arch/parisc/kernel/ |
A D | irq.c | 78 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq() 81 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq() 93 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq() 96 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq() 148 #define irq_stats(x) (&per_cpu(irq_stat, x)) 340 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr() 352 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 428 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check() 444 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check() 543 per_cpu(cpu_data, cpu).hpa); in do_cpu_irq_mask() [all …]
|
/linux/arch/x86/kvm/vmx/ |
A D | posted_intr.c | 124 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in __pi_post_block() 126 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in __pi_post_block() 157 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in pi_pre_block() 159 &per_cpu(blocked_vcpu_on_cpu, in pi_pre_block() 161 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in pi_pre_block() 218 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); in pi_wakeup_handler() 219 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), in pi_wakeup_handler() 226 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); in pi_wakeup_handler() 231 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); in pi_init_cpu() 232 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); in pi_init_cpu()
|
/linux/arch/riscv/mm/ |
A D | context.c | 52 if (per_cpu(reserved_context, cpu) == cntx) { in check_update_reserved_context() 54 per_cpu(reserved_context, cpu) = newcntx; in check_update_reserved_context() 74 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); in __flush_context() 82 cntx = per_cpu(reserved_context, i); in __flush_context() 85 per_cpu(reserved_context, i) = cntx; in __flush_context() 169 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); in set_mm_asid() 172 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), in set_mm_asid() 188 atomic_long_set(&per_cpu(active_context, cpu), cntx); in set_mm_asid()
|
/linux/arch/mips/kernel/ |
A D | mips-cpc.c | 75 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe() 98 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other() 99 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other() 118 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other() 119 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
|
/linux/arch/x86/kernel/ |
A D | setup_percpu.c | 224 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas() 225 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas() 235 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas() 237 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas() 239 per_cpu(x86_cpu_to_acpiid, cpu) = in setup_per_cpu_areas() 243 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas() 247 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
|
/linux/drivers/xen/events/ |
A D | events_2l.c | 52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_remove() 58 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu))); in evtchn_2l_bind_to_cpu() 59 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu() 152 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns() 268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt() 280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt() 289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt() 353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume() 359 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * in evtchn_2l_percpu_deinit()
|
/linux/arch/powerpc/include/asm/ |
A D | smp.h | 111 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask() 116 return per_cpu(cpu_core_map, cpu); in cpu_core_mask() 121 return per_cpu(cpu_l2_cache_map, cpu); in cpu_l2_cache_mask() 126 return per_cpu(cpu_smallcore_map, cpu); in cpu_smallcore_mask() 140 return per_cpu(cpu_smallcore_map, cpu); in cpu_smt_mask() 142 return per_cpu(cpu_sibling_map, cpu); in cpu_smt_mask()
|
/linux/arch/arm/kernel/ |
A D | smp.c | 397 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info() 487 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done() 622 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion() 628 complete(per_cpu(cpu_completion, cpu)); in ipi_complete() 818 if (!per_cpu(l_p_j_ref, first)) { in cpufreq_callback() 820 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback() 821 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback() 822 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback() 837 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), in cpufreq_callback() 838 per_cpu(l_p_j_ref_freq, first), freq->new); in cpufreq_callback() [all …]
|
/linux/drivers/acpi/ |
A D | cppc_acpi.c | 320 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); in send_pcc_cmd() 415 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); in acpi_cpc_valid() 442 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); in acpi_get_psd_map() 464 match_cpc_ptr = per_cpu(cpc_desc_ptr, i); in acpi_get_psd_map() 806 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; in acpi_cppc_processor_probe() 811 per_cpu(cpc_desc_ptr, pr->id) = NULL; in acpi_cppc_processor_probe() 861 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); in acpi_cppc_processor_exit() 917 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); in cpc_read() 962 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); in cpc_write() 1243 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); in cppc_set_perf() [all …]
|
/linux/arch/x86/include/asm/ |
A D | topology.h | 117 #define topology_cluster_id(cpu) (per_cpu(cpu_l2c_id, cpu)) 118 #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) 120 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 121 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 210 return per_cpu(arch_freq_scale, cpu); in arch_scale_freq_capacity()
|
/linux/arch/x86/kernel/apic/ |
A D | x2apic_cluster.c | 63 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask() 119 cmsk = per_cpu(cluster_masks, cpu); in init_x2apic_ldr() 134 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask() 158 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) in x2apic_prepare_cpu() 165 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu() 169 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
|