/linux/arch/x86/platform/uv/ |
A D | uv_time.c | 50 int next_cpu; member 159 head->next_cpu = -1; in uv_rtc_allocate_timers() 176 head->next_cpu = -1; in uv_rtc_find_next_timer() 185 head->next_cpu = bcpu; in uv_rtc_find_next_timer() 209 int next_cpu; in uv_rtc_set_timer() local 213 next_cpu = head->next_cpu; in uv_rtc_set_timer() 217 if (next_cpu < 0 || bcpu == next_cpu || in uv_rtc_set_timer() 218 expires < head->cpu[next_cpu].expires) { in uv_rtc_set_timer() 219 head->next_cpu = bcpu; in uv_rtc_set_timer() 249 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) in uv_rtc_unset_timer() [all …]
|
/linux/tools/testing/selftests/bpf/ |
A D | test_lru_map.c | 163 int next_cpu = 0; in test_lru_sanity0() local 168 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity0() 262 int next_cpu = 0; in test_lru_sanity1() local 271 assert(sched_next_online(0, &next_cpu) != -1); in test_lru_sanity1() 339 int next_cpu = 0; in test_lru_sanity2() local 446 int next_cpu = 0; in test_lru_sanity3() local 509 int next_cpu = 0; in test_lru_sanity4() local 584 int next_cpu = 0; in test_lru_sanity5() local 633 int next_cpu = 0; in test_lru_sanity6() local 699 int next_cpu = 0; in test_lru_sanity7() local [all …]
|
A D | bench.c | 319 static int next_cpu(struct cpu_set *cpu_set) in next_cpu() function 325 for (i = cpu_set->next_cpu; i < cpu_set->cpus_len; i++) { in next_cpu() 327 cpu_set->next_cpu = i + 1; in next_cpu() 335 return cpu_set->next_cpu++; in next_cpu() 443 next_cpu(&env.cons_cpus)); in setup_benchmark() 450 env.prod_cpus.next_cpu = env.cons_cpus.next_cpu; in setup_benchmark() 462 next_cpu(&env.prod_cpus)); in setup_benchmark()
|
A D | bench.h | 17 int next_cpu; member
|
/linux/arch/parisc/kernel/ |
A D | irq.c | 346 static int next_cpu = -1; in txn_alloc_addr() local 348 next_cpu++; /* assign to "next" CPU we want this bugger on */ in txn_alloc_addr() 351 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr() 352 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 353 !cpu_online(next_cpu))) in txn_alloc_addr() 354 next_cpu++; in txn_alloc_addr() 356 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr() 357 next_cpu = 0; /* nothing else, assign monarch */ in txn_alloc_addr() 359 return txn_affinity_addr(virt_irq, next_cpu); in txn_alloc_addr()
|
/linux/kernel/trace/ |
A D | trace_hwlat.c | 318 int next_cpu; in move_to_next_cpu() local 330 next_cpu = cpumask_next(raw_smp_processor_id(), current_mask); in move_to_next_cpu() 333 if (next_cpu >= nr_cpu_ids) in move_to_next_cpu() 334 next_cpu = cpumask_first(current_mask); in move_to_next_cpu() 336 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ in move_to_next_cpu() 340 cpumask_set_cpu(next_cpu, current_mask); in move_to_next_cpu() 426 int next_cpu; in start_single_kthread() local 443 next_cpu = cpumask_first(current_mask); in start_single_kthread() 445 cpumask_set_cpu(next_cpu, current_mask); in start_single_kthread()
|
A D | trace_entries.h | 117 __field( unsigned int, next_cpu ) \ 134 __entry->next_cpu) 152 __entry->next_cpu)
|
A D | trace_sched_wakeup.c | 393 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 421 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
|
A D | trace_output.c | 943 field->next_cpu, in trace_ctxwake_print() 977 field->next_cpu, in trace_ctxwake_raw() 1013 SEQ_PUT_HEX_FIELD(s, field->next_cpu); in trace_ctxwake_hex() 1044 SEQ_PUT_FIELD(s, field->next_cpu); in trace_ctxwake_bin()
|
A D | trace.c | 3575 int next_cpu = -1; in __find_next_entry() local 3605 next_cpu = cpu; in __find_next_entry() 3615 *ent_cpu = next_cpu; in __find_next_entry()
|
/linux/arch/x86/kernel/ |
A D | tsc_sync.c | 95 int next_cpu; in tsc_sync_check_timer_fn() local 100 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in tsc_sync_check_timer_fn() 101 if (next_cpu >= nr_cpu_ids) in tsc_sync_check_timer_fn() 102 next_cpu = cpumask_first(cpu_online_mask); in tsc_sync_check_timer_fn() 105 add_timer_on(&tsc_sync_check_timer, next_cpu); in tsc_sync_check_timer_fn()
|
/linux/tools/testing/selftests/kvm/ |
A D | rseq_test.c | 62 static int next_cpu(int cpu) in next_cpu() function 92 for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { in migration_worker()
|
/linux/drivers/net/wireguard/ |
A D | queueing.h | 164 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) in wg_queue_enqueue_per_device_and_peer() argument 178 cpu = wg_cpumask_next_online(next_cpu); in wg_queue_enqueue_per_device_and_peer()
|
/linux/kernel/time/ |
A D | clocksource.c | 356 int next_cpu, reset_pending; in clocksource_watchdog() local 468 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog() 469 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog() 470 next_cpu = cpumask_first(cpu_online_mask); in clocksource_watchdog() 478 add_timer_on(&watchdog_timer, next_cpu); in clocksource_watchdog()
|
A D | tick-broadcast.c | 689 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local 718 next_cpu = cpu; in tick_handle_oneshot_broadcast() 755 tick_broadcast_set_event(dev, next_cpu, next_event); in tick_handle_oneshot_broadcast()
|
/linux/block/ |
A D | blk-mq.c | 1807 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu() local 1814 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu() 1816 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu() 1817 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu() 1825 if (!cpu_online(next_cpu)) { in blk_mq_hctx_next_cpu() 1835 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu() 1840 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu() 1841 return next_cpu; in blk_mq_hctx_next_cpu() 3426 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
|
/linux/drivers/irqchip/ |
A D | irq-gic-v3.c | 1144 int next_cpu, cpu = *base_cpu; in gic_compute_target_list() local 1151 next_cpu = cpumask_next(cpu, mask); in gic_compute_target_list() 1152 if (next_cpu >= nr_cpu_ids) in gic_compute_target_list() 1154 cpu = next_cpu; in gic_compute_target_list()
|
/linux/include/linux/ |
A D | blk-mq.h | 262 int next_cpu; member
|
/linux/net/core/ |
A D | dev.c | 4375 struct rps_dev_flow *rflow, u16 next_cpu) in set_rps_cpu() argument 4377 if (next_cpu < nr_cpu_ids) { in set_rps_cpu() 4390 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu() 4411 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu() 4414 rflow->cpu = next_cpu; in set_rps_cpu() 4462 u32 next_cpu; in get_rps_cpu() local 4470 next_cpu = ident & rps_cpu_mask; in get_rps_cpu() 4489 if (unlikely(tcpu != next_cpu) && in get_rps_cpu() 4493 tcpu = next_cpu; in get_rps_cpu() 4494 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
|
/linux/drivers/net/ethernet/mediatek/ |
A D | mtk_eth_soc.c | 1406 u32 next_cpu = desc->txd2; in mtk_poll_tx_qdma() local 1431 cpu = next_cpu; in mtk_poll_tx_qdma()
|
/linux/kernel/sched/ |
A D | fair.c | 11670 goto next_cpu; in sched_group_set_idle() 11698 next_cpu: in sched_group_set_idle()
|