/linux/tools/lib/ |
A D | rbtree.c | 242 if (rb_is_red(sibling)) { in ____rb_erase_color() 252 tmp1 = sibling->rb_left; in ____rb_erase_color() 259 sibling = tmp1; in ____rb_erase_color() 261 tmp1 = sibling->rb_right; in ____rb_erase_color() 327 tmp1 = sibling; in ____rb_erase_color() 328 sibling = tmp2; in ____rb_erase_color() 342 tmp2 = sibling->rb_left; in ____rb_erase_color() 363 sibling = tmp1; in ____rb_erase_color() 365 tmp1 = sibling->rb_left; in ____rb_erase_color() 391 tmp1 = sibling; in ____rb_erase_color() [all …]
|
/linux/lib/ |
A D | rbtree.c | 242 if (rb_is_red(sibling)) { in ____rb_erase_color() 252 tmp1 = sibling->rb_left; in ____rb_erase_color() 259 sibling = tmp1; in ____rb_erase_color() 261 tmp1 = sibling->rb_right; in ____rb_erase_color() 327 tmp1 = sibling; in ____rb_erase_color() 328 sibling = tmp2; in ____rb_erase_color() 342 tmp2 = sibling->rb_left; in ____rb_erase_color() 363 sibling = tmp1; in ____rb_erase_color() 365 tmp1 = sibling->rb_left; in ____rb_erase_color() 391 tmp1 = sibling; in ____rb_erase_color() [all …]
|
/linux/kernel/ |
A D | resource.c | 73 return p->sibling; in next_resource() 80 return p->sibling; in next_resource_skip_children() 217 p = &tmp->sibling; in __request_resource() 243 chd->sibling = tmp->sibling; in __release_resource() 248 p = &tmp->sibling; in __release_resource() 262 p = p->sibling; in __release_child_resources() 823 new->sibling = next->sibling; in __insert_resource() 959 if (res->sibling && (res->sibling->start <= end)) in __adjust_resource() 1416 new_res->sibling = res->sibling; in release_mem_region_adjustable() 1477 res->sibling = cur->sibling; in merge_system_ram_resource() [all …]
|
/linux/arch/s390/kernel/ |
A D | guarded_storage.c | 96 struct task_struct *sibling; in gs_broadcast() local 99 for_each_thread(current, sibling) { in gs_broadcast() 100 if (!sibling->thread.gs_bc_cb) in gs_broadcast() 102 if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE)) in gs_broadcast() 103 kick_process(sibling); in gs_broadcast()
|
A D | Makefile | 32 CFLAGS_stacktrace.o += -fno-optimize-sibling-calls 33 CFLAGS_dumpstack.o += -fno-optimize-sibling-calls 34 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
|
/linux/arch/sparc/kernel/ |
A D | pci_psycho.c | 189 if (pbm->sibling) in psycho_ue_intr() 190 psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR); in psycho_ue_intr() 526 pbm->sibling = psycho_find_sibling(upa_portid); in psycho_probe() 527 if (pbm->sibling) { in psycho_probe() 528 iommu = pbm->sibling->iommu; in psycho_probe() 563 if (!pbm->sibling) { in psycho_probe() 579 if (pbm->sibling) in psycho_probe() 580 pbm->sibling->sibling = pbm; in psycho_probe() 587 if (!pbm->sibling) in psycho_probe()
|
/linux/tools/testing/selftests/seccomp/ |
A D | seccomp_bpf.c | 2496 memset(&self->sibling, 0, sizeof(self->sibling)); in FIXTURE_SETUP() 2511 self->sibling[0].tid = 0; in FIXTURE_SETUP() 2515 self->sibling[0].diverge = 0; in FIXTURE_SETUP() 2516 self->sibling[0].num_waits = 1; in FIXTURE_SETUP() 2519 self->sibling[1].tid = 0; in FIXTURE_SETUP() 2523 self->sibling[1].diverge = 0; in FIXTURE_SETUP() 2588 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); in tsync_start_sibling() 2620 self->sibling[0].diverge = 1; in TEST_F() 2767 self->sibling[0].diverge = 1; in TEST_F() 2812 self->sibling[0].diverge = 1; in TEST_F() [all …]
|
/linux/drivers/base/ |
A D | arch_topology.c | 674 int sibling; in remove_cpu_topology() local 676 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology() 677 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology() 678 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology() 679 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology() 680 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology() 681 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology() 682 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology() 683 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
|
/linux/drivers/gpu/drm/i915/gt/ |
A D | intel_execlists_submission.c | 3615 if (sibling >= ve->num_siblings) in virtual_get_sibling() 3618 return ve->siblings[sibling]; in virtual_get_sibling() 3695 &sibling->execlists.virtual); in virtual_submission_tasklet() 3733 &sibling->execlists.virtual, in virtual_submission_tasklet() 3857 if (sibling->mask & ve->base.mask) { in execlists_create_virtual() 3859 sibling->name); in execlists_create_virtual() 3881 ve->base.mask |= sibling->mask; in execlists_create_virtual() 3894 sibling->class, ve->base.class); in execlists_create_virtual() 3901 ve->base.class = sibling->class; in execlists_create_virtual() 3914 sibling->emit_fini_breadcrumb_dw; in execlists_create_virtual() [all …]
|
/linux/Documentation/devicetree/ |
A D | of_unittest.rst | 72 struct device_node *sibling; 77 considering only child and sibling pointers. There exists another pointer, 79 a particular level the child node and all the sibling nodes will have a parent 142 replaces the current child and turns it into its sibling. So, when the testcase 183 sibling compared to the earlier structure (Figure 2). After attaching first 185 (i.e. test-child0) to become a sibling and makes itself a child node, 204 node's parent to its sibling or attaches the previous sibling to the given 205 node's sibling, as appropriate. That is it :)
|
/linux/drivers/powercap/ |
A D | dtpm.c | 71 list_for_each_entry(child, &dtpm->children, sibling) { in __get_power_uw() 97 list_for_each_entry(child, &dtpm->children, sibling) { in __dtpm_rebalance_weight() 199 list_del(&dtpm->sibling); in dtpm_release_zone() 268 list_for_each_entry(child, &dtpm->children, sibling) { in __set_power_limit_uw() 368 INIT_LIST_HEAD(&dtpm->sibling); in dtpm_init() 445 list_add_tail(&dtpm->sibling, &parent->children); in dtpm_register()
|
/linux/kernel/sched/ |
A D | topology.c | 882 struct sched_domain *sibling; in build_balance_mask() local 895 if (!sibling->child) in build_balance_mask() 973 while (sibling->child && in find_descended_sibling() 976 sibling = sibling->child; in find_descended_sibling() 983 while (sibling->child && in find_descended_sibling() 985 sched_domain_span(sibling))) in find_descended_sibling() 986 sibling = sibling->child; in find_descended_sibling() 988 return sibling; in find_descended_sibling() 998 struct sched_domain *sibling; in build_overlap_sched_groups() local 1055 if (sibling->child && in build_overlap_sched_groups() [all …]
|
/linux/drivers/clk/renesas/ |
A D | rzg2l-cpg.c | 464 struct mstp_clock *sibling; member 519 if (clock->sibling) { in rzg2l_mod_clock_enable() 525 enabled = clock->sibling->enabled; in rzg2l_mod_clock_enable() 539 if (clock->sibling) { in rzg2l_mod_clock_disable() 545 enabled = clock->sibling->enabled; in rzg2l_mod_clock_disable() 567 if (clock->sibling) in rzg2l_mod_clock_is_enabled() 666 struct mstp_clock *sibling; in rzg2l_cpg_register_mod_clk() local 669 sibling = rzg2l_mod_clock__get_sibling(clock, priv); in rzg2l_cpg_register_mod_clk() 670 if (sibling) { in rzg2l_cpg_register_mod_clk() 671 clock->sibling = sibling; in rzg2l_cpg_register_mod_clk() [all …]
|
/linux/drivers/pci/hotplug/ |
A D | acpiphp_glue.c | 161 list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) in free_bridge() 329 list_add_tail(&newfunc->sibling, &slot->funcs); in acpiphp_add_context() 344 list_for_each_entry(func, &slot->funcs, sibling) { in cleanup_bridge() 397 list_for_each_entry(func, &slot->funcs, sibling) { in acpiphp_set_acpi_region() 413 list_for_each_entry(func, &slot->funcs, sibling) { in check_hotplug_bridge() 425 list_for_each_entry(func, &slot->funcs, sibling) { in acpiphp_rescan_slot() 527 list_for_each_entry(func, &slot->funcs, sibling) { in enable_slot() 560 list_for_each_entry(func, &slot->funcs, sibling) in disable_slot() 596 list_for_each_entry(func, &slot->funcs, sibling) { in get_slot_status() 1005 list_for_each_entry(func, &slot->funcs, sibling) in acpiphp_disable_and_eject_slot()
|
/linux/drivers/sh/clk/ |
A D | core.c | 217 list_del_init(&child->sibling); in clk_reparent() 219 list_add(&child->sibling, &parent->children); in clk_reparent() 230 list_for_each_entry(clkp, &tclk->children, sibling) { in propagate_rate() 321 list_for_each_entry(clkp, &root_clks, sibling) { in recalculate_root_clocks() 434 list_add(&clk->sibling, &clk->parent->children); in clk_register() 436 list_add(&clk->sibling, &root_clks); in clk_register() 455 list_del(&clk->sibling); in clk_unregister()
|
/linux/arch/x86/kernel/ |
A D | smpboot.c | 1603 int sibling; in remove_siblinginfo() local 1606 for_each_cpu(sibling, topology_core_cpumask(cpu)) { in remove_siblinginfo() 1612 cpu_data(sibling).booted_cores--; in remove_siblinginfo() 1615 for_each_cpu(sibling, topology_die_cpumask(cpu)) in remove_siblinginfo() 1616 cpumask_clear_cpu(cpu, topology_die_cpumask(sibling)); in remove_siblinginfo() 1618 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) { in remove_siblinginfo() 1621 cpu_data(sibling).smt_active = false; in remove_siblinginfo() 1624 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) in remove_siblinginfo() 1625 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); in remove_siblinginfo() 1626 for_each_cpu(sibling, cpu_l2c_shared_mask(cpu)) in remove_siblinginfo() [all …]
|
/linux/drivers/perf/ |
A D | qcom_l2_pmu.c | 442 struct perf_event *sibling; in l2_cache_event_init() local 479 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init() 480 if (sibling->pmu != event->pmu && in l2_cache_event_init() 481 !is_software_event(sibling)) { in l2_cache_event_init() 516 for_each_sibling_event(sibling, event->group_leader) { in l2_cache_event_init() 517 if ((sibling != event) && in l2_cache_event_init() 518 !is_software_event(sibling) && in l2_cache_event_init() 519 (L2_EVT_GROUP(sibling->attr.config) == in l2_cache_event_init() 523 sibling->attr.config, in l2_cache_event_init()
|
/linux/Documentation/admin-guide/hw-vuln/ |
A D | core-scheduling.rst | 105 During a schedule() event on any sibling of a core, the highest priority task on 106 the sibling's core is picked and assigned to the sibling calling schedule(), if 107 the sibling has the task enqueued. For rest of the siblings in the core, 114 switch to the new task immediately. If an idle task is selected for a sibling, 115 then the sibling is considered to be in a `forced idle` state. I.e., it may 127 task. If a sibling does not have a trusted task to run, it will be forced idle 131 the sibling to force it into idle. This results in 4 cases which need to be 189 sibling. Such attacks are possible for any combination of sibling CPU modes 212 sibling hyperthreads from one another. Prototypes of mitigations have been posted
|
/linux/net/netfilter/ |
A D | nf_conntrack_pptp.c | 155 struct nf_conn *sibling; in destroy_sibling_or_exp() local 163 sibling = nf_ct_tuplehash_to_ctrack(h); in destroy_sibling_or_exp() 164 pr_debug("setting timeout of conntrack %p to 0\n", sibling); in destroy_sibling_or_exp() 165 sibling->proto.gre.timeout = 0; in destroy_sibling_or_exp() 166 sibling->proto.gre.stream_timeout = 0; in destroy_sibling_or_exp() 167 nf_ct_kill(sibling); in destroy_sibling_or_exp() 168 nf_ct_put(sibling); in destroy_sibling_or_exp()
|
/linux/arch/powerpc/platforms/85xx/ |
A D | smp.c | 390 int sibling = cpu_last_thread_sibling(cpu); in mpc85xx_smp_kexec_cpu_down() local 409 } else if (sibling != crashing_cpu && in mpc85xx_smp_kexec_cpu_down() 411 cpu_thread_in_core(sibling) != 0) { in mpc85xx_smp_kexec_cpu_down() 413 disable_cpu = sibling; in mpc85xx_smp_kexec_cpu_down()
|
/linux/drivers/acpi/ |
A D | dock.c | 36 struct list_head sibling; member 134 list_for_each_entry(ds, &dock_stations, sibling) in find_dock_station() 192 list_for_each_entry(dock_station, &dock_stations, sibling) in is_dock_device() 612 INIT_LIST_HEAD(&dock_station->sibling); in acpi_dock_add() 635 list_add(&dock_station->sibling, &dock_stations); in acpi_dock_add()
|
/linux/drivers/gpu/drm/i915/gt/uc/ |
A D | intel_guc_submission.c | 1175 if (num_siblings++ == sibling) in guc_virtual_get_sibling() 3126 struct intel_engine_cs *sibling; in guc_irq_enable_breadcrumbs() local 3139 struct intel_engine_cs *sibling; in guc_irq_disable_breadcrumbs() local 3159 struct intel_engine_cs *sibling = in guc_init_breadcrumbs() local 3162 if (sibling) { in guc_init_breadcrumbs() 4269 sibling->name); in guc_create_virtual() 4274 ve->base.mask |= sibling->mask; in guc_create_virtual() 4283 ve->base.class = sibling->class; in guc_create_virtual() 4290 sibling->add_active_request; in guc_create_virtual() 4296 sibling->emit_init_breadcrumb; in guc_create_virtual() [all …]
|
/linux/arch/arm/mach-omap1/ |
A D | clock.c | 739 list_del_init(&child->sibling); in clk_reparent() 741 list_add(&child->sibling, &parent->children); in clk_reparent() 753 list_for_each_entry(clkp, &tclk->children, sibling) { in propagate_rate() 773 list_for_each_entry(clkp, &root_clks, sibling) { in recalculate_root_clocks() 805 list_add(&clk->sibling, &clk->parent->children); in clk_register() 807 list_add(&clk->sibling, &root_clks); in clk_register() 824 list_del(&clk->sibling); in clk_unregister()
|
/linux/drivers/of/ |
A D | dynamic.c | 226 np->sibling = np->parent->child; in __of_attach_node() 268 parent->child = np->sibling; in __of_detach_node() 272 prevsib->sibling != np; in __of_detach_node() 273 prevsib = prevsib->sibling) in __of_detach_node() 275 prevsib->sibling = np->sibling; in __of_detach_node()
|
/linux/net/sched/ |
A D | sch_cbq.c | 985 clp = &this->sibling; in cbq_unlink_class() 989 *clp = cl->sibling; in cbq_unlink_class() 992 clp = &cl->sibling; in cbq_unlink_class() 993 } while ((cl = *clp) != this->sibling); in cbq_unlink_class() 996 this->tparent->children = this->sibling; in cbq_unlink_class() 997 if (this->sibling == this) in cbq_unlink_class() 1001 WARN_ON(this->sibling != this); in cbq_unlink_class() 1010 this->sibling = this; in cbq_link_class() 1019 this->sibling = parent->children->sibling; in cbq_link_class() 1020 parent->children->sibling = this; in cbq_link_class() [all …]
|