/xen/xen/common/ |
A D | softirq.c | 85 unsigned int cpu, this_cpu = smp_processor_id(); in cpumask_raise_softirq() local 88 if ( !per_cpu(batching, this_cpu) || in_irq() ) in cpumask_raise_softirq() 94 raise_mask = &per_cpu(batch_mask, this_cpu); in cpumask_raise_softirq() 98 cpu != this_cpu && in cpumask_raise_softirq() 108 unsigned int this_cpu = smp_processor_id(); in cpu_raise_softirq() local 111 || (cpu == this_cpu) in cpu_raise_softirq() 115 if ( !per_cpu(batching, this_cpu) || in_irq() ) in cpu_raise_softirq() 123 ++this_cpu(batching); in cpu_raise_softirq_batch_begin() 128 unsigned int cpu, this_cpu = smp_processor_id(); in cpu_raise_softirq_batch_finish() local 131 ASSERT(per_cpu(batching, this_cpu)); in cpu_raise_softirq_batch_finish() [all …]
|
A D | hypfs.c | 39 ASSERT(this_cpu(hypfs_locked) != hypfs_write_locked); in hypfs_read_lock() 42 this_cpu(hypfs_locked) = hypfs_read_locked; in hypfs_read_lock() 47 ASSERT(this_cpu(hypfs_locked) == hypfs_unlocked); in hypfs_write_lock() 50 this_cpu(hypfs_locked) = hypfs_write_locked; in hypfs_write_lock() 55 enum hypfs_lock_state locked = this_cpu(hypfs_locked); in hypfs_unlock() 57 this_cpu(hypfs_locked) = hypfs_unlocked; in hypfs_unlock() 218 ASSERT(this_cpu(hypfs_locked) != hypfs_unlocked); in hypfs_read_dir() 255 ASSERT(this_cpu(hypfs_locked) != hypfs_unlocked); in hypfs_read_leaf() 299 ASSERT(this_cpu(hypfs_locked) == hypfs_write_locked); in hypfs_write_leaf() 336 ASSERT(this_cpu(hypfs_locked) == hypfs_write_locked); in hypfs_write_bool() [all …]
|
A D | random.c | 13 unsigned int next = this_cpu(seed), val = arch_get_random(); in get_random() 29 this_cpu(seed) = next; in get_random()
|
A D | trace.c | 653 ed.lost_records = this_cpu(lost_records); in insert_lost_records() 654 ed.first_tsc = this_cpu(lost_records_first_tsc); in insert_lost_records() 656 this_cpu(lost_records) = 0; in insert_lost_records() 724 spin_lock_irqsave(&this_cpu(t_lock), flags); in __trace_var() 726 buf = this_cpu(t_bufs); in __trace_var() 754 if ( this_cpu(lost_records) ) in __trace_var() 778 if ( ++this_cpu(lost_records) == 1 ) in __trace_var() 779 this_cpu(lost_records_first_tsc)=(u64)get_cycles(); in __trace_var() 789 if ( this_cpu(lost_records) ) in __trace_var() 811 spin_unlock_irqrestore(&this_cpu(t_lock), flags); in __trace_var()
|
A D | rcupdate.c | 284 rdp = &this_cpu(rcu_data); in call_rcu() 471 struct rcu_data *rdp = &this_cpu(rcu_data); in rcu_process_callbacks() 536 struct rcu_data *rdp = &this_cpu(rcu_data); in rcu_idle_timer_start() 552 struct rcu_data *rdp = &this_cpu(rcu_data); in rcu_idle_timer_stop() 592 struct rcu_data *rdp = &this_cpu(rcu_data); in rcu_check_callbacks() 657 rcu_offline_cpu(&this_cpu(rcu_data), &rcu_ctrlblk, rdp); in cpu_callback()
|
/xen/xen/arch/x86/genapic/ |
A D | x2apic.c | 44 unsigned int cpu, this_cpu = smp_processor_id(); in init_apic_ldr_x2apic_cluster() local 46 per_cpu(cpu_2_logical_apicid, this_cpu) = apic_read(APIC_LDR); in init_apic_ldr_x2apic_cluster() 48 if ( per_cpu(cluster_cpus, this_cpu) ) in init_apic_ldr_x2apic_cluster() 50 ASSERT(cpumask_test_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu))); in init_apic_ldr_x2apic_cluster() 54 per_cpu(cluster_cpus, this_cpu) = cluster_cpus_spare; in init_apic_ldr_x2apic_cluster() 57 if (this_cpu == cpu || x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) in init_apic_ldr_x2apic_cluster() 59 per_cpu(cluster_cpus, this_cpu) = per_cpu(cluster_cpus, cpu); in init_apic_ldr_x2apic_cluster() 62 if ( per_cpu(cluster_cpus, this_cpu) == cluster_cpus_spare ) in init_apic_ldr_x2apic_cluster() 65 cpumask_set_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu)); in init_apic_ldr_x2apic_cluster() 258 if ( !this_cpu(cluster_cpus) ) in apic_x2apic_probe()
|
/xen/xen/include/xen/ |
A D | perfc.h | 57 #define perfc_value(x) this_cpu(perfcounters)[PERFC_ ## x] 60 this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 ) 61 #define perfc_set(x,v) (this_cpu(perfcounters)[PERFC_ ## x] = (v)) 64 this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) ) 65 #define perfc_incr(x) (++this_cpu(perfcounters)[PERFC_ ## x]) 66 #define perfc_decr(x) (--this_cpu(perfcounters)[PERFC_ ## x]) 69 ++this_cpu(perfcounters)[PERFC_ ## x + (y)] : 0 ) 70 #define perfc_add(x,v) (this_cpu(perfcounters)[PERFC_ ## x] += (v)) 73 this_cpu(perfcounters)[PERFC_ ## x + (y)] = (v) : (v) )
|
A D | rcupdate.h | 48 this_cpu(rcu_lock_cnt)++; in rcu_quiesce_disable() 55 this_cpu(rcu_lock_cnt)--; in rcu_quiesce_enable() 61 return !this_cpu(rcu_lock_cnt); in rcu_quiesce_allowed()
|
/xen/xen/arch/x86/guest/hyperv/ |
A D | hyperv.c | 133 if ( this_cpu(hv_input_page) ) in setup_hypercall_pcpu_arg() 136 this_cpu(hv_input_page) = alloc_xenheap_page(); in setup_hypercall_pcpu_arg() 137 if ( !this_cpu(hv_input_page) ) in setup_hypercall_pcpu_arg() 145 this_cpu(hv_vp_index) = vp_index_msr; in setup_hypercall_pcpu_arg() 157 if ( !this_cpu(hv_vp_assist) ) in setup_vp_assist() 159 this_cpu(hv_vp_assist) = alloc_xenheap_page(); in setup_vp_assist() 160 if ( !this_cpu(hv_vp_assist) ) in setup_vp_assist() 167 clear_page(this_cpu(hv_vp_assist)); in setup_vp_assist() 171 msr.pfn = virt_to_mfn(this_cpu(hv_vp_assist)); in setup_vp_assist() 216 if ( !hcall_page_ready || !this_cpu(hv_input_page) ) in flush_tlb()
|
/xen/xen/arch/arm/ |
A D | processor.c | 31 this_cpu(processor) = procinfo->processor; in processor_setup() 36 if ( !this_cpu(processor) || !this_cpu(processor)->vcpu_initialise ) in processor_vcpu_initialise() 39 this_cpu(processor)->vcpu_initialise(v); in processor_vcpu_initialise()
|
A D | gic-vgic.c | 28 #define lr_all_full() (this_cpu(lr_mask) == ((1 << gic_get_nr_lrs()) - 1)) 114 unsigned long *lr_mask = (unsigned long *) &this_cpu(lr_mask); in gic_find_unused_lr() 154 set_bit(i, &this_cpu(lr_mask)); in gic_raise_guest_irq() 187 clear_bit(i, &this_cpu(lr_mask)); in gic_update_one_lr() 222 clear_bit(i, &this_cpu(lr_mask)); in gic_update_one_lr() 268 while ((i = find_next_bit((const unsigned long *) &this_cpu(lr_mask), in vgic_sync_from_lrs() 322 set_bit(lr, &this_cpu(lr_mask)); in gic_restore_pending_irqs()
|
/xen/xen/arch/x86/guest/xen/ |
A D | xen.c | 97 unsigned int vcpu = this_cpu(vcpu_id); in map_vcpuinfo() 103 this_cpu(vcpu_info) = &XEN_shared_info->vcpu_info[vcpu]; in map_vcpuinfo() 109 this_cpu(vcpu_info) = &vcpu_info[vcpu]; in map_vcpuinfo() 118 this_cpu(vcpu_info) = &vcpu_info[vcpu]; in map_vcpuinfo() 124 this_cpu(vcpu_info) = &XEN_shared_info->vcpu_info[vcpu]; in map_vcpuinfo() 139 this_cpu(vcpu_id) = ebx; in set_vcpu_id() 141 this_cpu(vcpu_id) = smp_processor_id(); in set_vcpu_id() 175 struct vcpu_info *vcpu_info = this_cpu(vcpu_info); in xen_evtchn_upcall() 215 rc = xen_hypercall_set_evtchn_upcall_vector(this_cpu(vcpu_id), in init_evtchn()
|
/xen/xen/arch/x86/acpi/ |
A D | cpuidle_menu.c | 149 struct menu_device *data = &this_cpu(menu_devices); in avg_intr_interval_us() 158 irq_sum = (data->pf.irq_sum + (this_cpu(irq_count) - data->pf.irq_count_stamp) in avg_intr_interval_us() 170 data->pf.irq_count_stamp= this_cpu(irq_count); in avg_intr_interval_us() 179 s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000; in get_sleep_length_us() 190 struct menu_device *data = &this_cpu(menu_devices); in menu_select() 242 struct menu_device *data = &this_cpu(menu_devices); in menu_reflect() 297 const struct menu_device *data = &this_cpu(menu_devices); in menu_get_trace_data()
|
/xen/xen/arch/x86/ |
A D | nmi.c | 154 unsigned int start_count = this_cpu(nmi_count); in wait_for_nmis() 161 if ( this_cpu(nmi_count) >= start_count + 2 ) in wait_for_nmis() 216 this_cpu(nmi_timer_ticks)++; in nmi_timer_fn() 217 set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000)); in nmi_timer_fn() 497 unsigned int sum = this_cpu(nmi_timer_ticks); in nmi_watchdog_tick() 499 if ( (this_cpu(last_irq_sums) == sum) && watchdog_enabled() ) in nmi_watchdog_tick() 505 this_cpu(alert_counter)++; in nmi_watchdog_tick() 506 if ( this_cpu(alert_counter) == opt_watchdog_timeout*nmi_hz ) in nmi_watchdog_tick() 516 this_cpu(last_irq_sums) = sum; in nmi_watchdog_tick() 517 this_cpu(alert_counter) = 0; in nmi_watchdog_tick()
|
A D | crash.c | 49 if ( !this_cpu(crash_save_done) ) in do_nmi_crash() 70 this_cpu(crash_save_done) = true; in do_nmi_crash()
|
A D | xstate.c | 62 this_cpu(xcr0) = xfeatures; in set_xcr0() 68 return this_cpu(xcr0); in get_xcr0() 76 u64 *this_xss = &this_cpu(xss); in set_msr_xss() 87 return this_cpu(xss); in get_msr_xss() 460 ptr->xsave_hdr.xcomp_bv &= this_cpu(xcr0) | this_cpu(xss); in xrstor() 466 ptr->xsave_hdr.xstate_bv &= this_cpu(xcr0); in xrstor() 797 unsigned long xcr0 = this_cpu(xcr0); in xstate_set_init()
|
A D | extable.c | 93 unsigned long stub = this_cpu(stubs.addr); in search_exception_table() 147 unsigned long addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; in stub_selftest() 155 uint8_t *ptr = map_domain_page(_mfn(this_cpu(stubs.mfn))) + in stub_selftest()
|
/xen/xen/include/asm-arm/ |
A D | current.h | 19 #define current (this_cpu(curr_vcpu)) 53 #define get_processor_id() this_cpu(cpu_id) 57 this_cpu(cpu_id) = (id); \
|
A D | guest_atomics.h | 29 if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \ 48 this_cpu(guest_safe_atomic_max)); \ 81 if ( clear_mask16_timeout(mask, p, this_cpu(guest_safe_atomic_max)) ) in guest_bitop() 100 this_cpu(guest_safe_atomic_max)) ) in __guest_cmpxchg()
|
/xen/xen/arch/x86/cpu/ |
A D | common.c | 118 static const struct cpu_dev *this_cpu = &default_cpu; variable 143 this_cpu(msr_misc_features))) in probe_cpuid_faulting() 158 uint64_t *this_misc_features = &this_cpu(msr_misc_features); in set_cpuid_faulting() 305 case X86_VENDOR_INTEL: this_cpu = &intel_cpu_dev; break; in early_cpu_init() 306 case X86_VENDOR_AMD: this_cpu = &amd_cpu_dev; break; in early_cpu_init() 382 if (this_cpu->c_early_init) in generic_identify() 383 this_cpu->c_early_init(c); in generic_identify() 474 if (this_cpu->c_init) in identify_cpu() 475 this_cpu->c_init(c); in identify_cpu() 745 struct tss_page *tss_page = &this_cpu(tss_page); in load_system_tables() [all …]
|
/xen/xen/arch/x86/hvm/ |
A D | asid.c | 65 struct hvm_asid_data *data = &this_cpu(hvm_asid_data); in hvm_asid_init() 97 struct hvm_asid_data *data = &this_cpu(hvm_asid_data); in hvm_asid_flush_core() 116 struct hvm_asid_data *data = &this_cpu(hvm_asid_data); in hvm_asid_handle_vmenter()
|
/xen/xen/include/asm-x86/ |
A D | ldt.h | 16 desc = (!is_pv_32bit_vcpu(v) ? this_cpu(gdt) : this_cpu(compat_gdt)) in load_LDT()
|
A D | msr.h | 245 return this_cpu(efer); in read_efer() 250 this_cpu(efer) = val; in write_efer() 261 uint32_t *this_tsc_aux = &this_cpu(tsc_aux); in wrmsr_tsc_aux()
|
/xen/xen/arch/x86/cpu/microcode/ |
A D | core.c | 377 unsigned int primary = cpumask_first(this_cpu(cpu_sibling_mask)); in microcode_nmi_callback() 397 this_cpu(loading_err) = ret; in microcode_nmi_callback() 418 this_cpu(cpu_sig).rev = in secondary_thread_fn() 419 per_cpu(cpu_sig, cpumask_first(this_cpu(cpu_sibling_mask))).rev; in secondary_thread_fn() 421 return this_cpu(loading_err); in secondary_thread_fn() 441 return this_cpu(loading_err); in primary_thread_fn() 544 else if ( cpu == cpumask_first(this_cpu(cpu_sibling_mask)) ) in do_microcode_update()
|
/xen/xen/arch/x86/cpu/mcheck/ |
A D | non-fatal.c | 41 mctc = mcheck_mca_logout(MCA_POLLER, this_cpu(poll_bankmask), in mce_checkregs() 97 if (!this_cpu(poll_bankmask)) in init_nonfatal_mce_checker()
|