/linux/arch/riscv/kernel/ |
A D | fpu.S | 71 fld f0, TASK_THREAD_F0_F0(a0) 72 fld f1, TASK_THREAD_F1_F0(a0) 73 fld f2, TASK_THREAD_F2_F0(a0) 74 fld f3, TASK_THREAD_F3_F0(a0) 75 fld f4, TASK_THREAD_F4_F0(a0) 76 fld f5, TASK_THREAD_F5_F0(a0) 77 fld f6, TASK_THREAD_F6_F0(a0) 78 fld f7, TASK_THREAD_F7_F0(a0) 79 fld f8, TASK_THREAD_F8_F0(a0) 80 fld f9, TASK_THREAD_F9_F0(a0) [all …]
|
/linux/include/linux/mlx5/ |
A D | device.h | 50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) argument 52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16) argument 53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) argument 54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) argument 55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf… argument 56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1… argument 58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) argument 60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld)) argument 69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) argument 96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | en_stats.h | 45 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) argument 46 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) argument 47 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) argument 48 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld) argument 50 #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld) argument 51 #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld) argument 52 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) argument 54 #define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) argument 55 #define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) argument 56 #define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) argument [all …]
|
/linux/net/decnet/ |
A D | dn_route.c | 327 if (compare_keys(&rth->fld, &rt->fld)) { in dn_insert_route() 1027 fld.daddr = fld.saddr; in dn_route_output_slow() 1037 fld.daddr = in dn_route_output_slow() 1052 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), in dn_route_output_slow() 1144 fld.saddr = fld.daddr; in dn_route_output_slow() 1183 memset(&rt->fld, 0, sizeof(rt->fld)); in dn_route_output_slow() 1212 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); in dn_route_output_slow() 1449 memset(&rt->fld, 0, sizeof(rt->fld)); in dn_route_input_slow() 1464 rt->fld.flowidn_mark = fld.flowidn_mark; in dn_route_input_slow() 1490 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); in dn_route_input_slow() [all …]
|
A D | dn_rules.c | 71 struct flowidn *fld = &flp->u.dn; in dn_fib_rule_action() local 97 err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result); in dn_fib_rule_action() 111 struct flowidn *fld = &fl->u.dn; in dn_fib_rule_match() local 112 __le16 daddr = fld->daddr; in dn_fib_rule_match() 113 __le16 saddr = fld->saddr; in dn_fib_rule_match() 186 struct flowidn fld = { .daddr = addr }; in dnet_addr_type() local 194 if (!tb->lookup(tb, &fld, &res)) { in dnet_addr_type()
|
A D | dn_fib.c | 199 struct flowidn fld; in dn_fib_check_nh() local 219 memset(&fld, 0, sizeof(fld)); in dn_fib_check_nh() 220 fld.daddr = nh->nh_gw; in dn_fib_check_nh() 221 fld.flowidn_oif = nh->nh_oif; in dn_fib_check_nh() 222 fld.flowidn_scope = r->rtm_scope + 1; in dn_fib_check_nh() 224 if (fld.flowidn_scope < RT_SCOPE_LINK) in dn_fib_check_nh() 225 fld.flowidn_scope = RT_SCOPE_LINK; in dn_fib_check_nh() 227 if ((err = dn_fib_lookup(&fld, &res)) != 0) in dn_fib_check_nh() 435 if (!fld->flowidn_oif || in dn_fib_semantic_match() 436 fld->flowidn_oif == nh->nh_oif) in dn_fib_semantic_match() [all …]
|
A D | dn_nsp_out.c | 71 struct flowidn fld; in dn_nsp_send() local 84 memset(&fld, 0, sizeof(fld)); in dn_nsp_send() 85 fld.flowidn_oif = sk->sk_bound_dev_if; in dn_nsp_send() 86 fld.saddr = dn_saddr2dn(&scp->addr); in dn_nsp_send() 87 fld.daddr = dn_saddr2dn(&scp->peer); in dn_nsp_send() 88 dn_sk_ports_copy(&fld, scp); in dn_nsp_send() 89 fld.flowidn_proto = DNPROTO_NSP; in dn_nsp_send() 90 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) { in dn_nsp_send()
|
/linux/arch/riscv/kvm/ |
A D | vcpu_switch.S | 365 fld f0, KVM_ARCH_FP_D_F0(a0) 366 fld f1, KVM_ARCH_FP_D_F1(a0) 367 fld f2, KVM_ARCH_FP_D_F2(a0) 368 fld f3, KVM_ARCH_FP_D_F3(a0) 369 fld f4, KVM_ARCH_FP_D_F4(a0) 370 fld f5, KVM_ARCH_FP_D_F5(a0) 371 fld f6, KVM_ARCH_FP_D_F6(a0) 372 fld f7, KVM_ARCH_FP_D_F7(a0) 373 fld f8, KVM_ARCH_FP_D_F8(a0) 374 fld f9, KVM_ARCH_FP_D_F9(a0) [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/diag/ |
A D | fs_tracepoint.c | 41 {.m = MLX5_GET(spec, mask, fld),\ 42 .v = MLX5_GET(spec, val, fld)} 45 {.m = MLX5_GET_BE(type, spec, mask, fld),\ 46 .v = MLX5_GET_BE(type, spec, val, fld)} 50 (name.m = MLX5_GET(type, mask, fld), \ 51 name.v = MLX5_GET(type, val, fld), \ 66 #define MASK_VAL_L2(type, name, fld) \ in print_lyr_2_4_hdrs() argument 87 #define MASK_VAL_L2_BE(type, name, fld) \ in print_lyr_2_4_hdrs() argument 135 MASK_VAL_L2(type, name, fld); \ in print_lyr_2_4_hdrs() 158 #define MASK_VAL_MISC(type, name, fld) \ in print_misc_parameters_hdrs() argument [all …]
|
/linux/scripts/coccinelle/misc/ |
A D | doubleinit.cocci | 18 identifier I, s, fld; 23 struct I s =@p0 { ..., .fld@p = E, ...}; 26 identifier I, s, r.fld; 31 struct I s =@p0 { ..., .fld@p = E, ...}; 35 fld << r.fld; 41 cocci.print_main(fld,p0) 47 fld << r.fld; 53 msg = "%s: first occurrence line %s, second occurrence line %s" % (fld,ps[0].line,pr[0].line)
|
/linux/scripts/gcc-plugins/ |
A D | latent_entropy_plugin.c | 161 tree fld, lst = TYPE_FIELDS(type); in handle_latent_entropy_attribute() local 164 for (fld = lst; fld; nelt++, fld = TREE_CHAIN(fld)) { in handle_latent_entropy_attribute() 167 fieldtype = TREE_TYPE(fld); in handle_latent_entropy_attribute() 173 *node, name, fld); in handle_latent_entropy_attribute() 177 if (fld) in handle_latent_entropy_attribute() 182 for (fld = lst; fld; fld = TREE_CHAIN(fld)) { in handle_latent_entropy_attribute() 183 tree random_const, fld_t = TREE_TYPE(fld); in handle_latent_entropy_attribute() 186 CONSTRUCTOR_APPEND_ELT(vals, fld, random_const); in handle_latent_entropy_attribute()
|
/linux/drivers/clk/baikal-t1/ |
A D | ccu-pll.c | 385 struct ccu_pll_dbgfs_fld *fld = priv; in ccu_pll_dbgfs_fld_set() local 386 struct ccu_pll *pll = fld->pll; in ccu_pll_dbgfs_fld_set() 390 val = clamp_t(u64, val, fld->min, fld->max); in ccu_pll_dbgfs_fld_set() 391 data = ((val - 1) << fld->lsb) & fld->mask; in ccu_pll_dbgfs_fld_set() 394 regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask, in ccu_pll_dbgfs_fld_set() 427 struct ccu_pll_dbgfs_fld *fld = priv; in ccu_pll_dbgfs_fld_get() local 428 struct ccu_pll *pll = fld->pll; in ccu_pll_dbgfs_fld_get() 431 regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data); in ccu_pll_dbgfs_fld_get() 432 *val = ((data & fld->mask) >> fld->lsb) + 1; in ccu_pll_dbgfs_fld_get()
|
/linux/drivers/net/ethernet/intel/ice/ |
A D | ice_flow.c | 916 switch (fld) { in ice_flow_xtract_fld() 1046 flds[fld].xtrct.prot_id = prot_id; in ice_flow_xtract_fld() 1047 flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * in ice_flow_xtract_fld() 1049 flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); in ice_flow_xtract_fld() 1051 flds[fld].xtrct.mask = ice_flds_info[fld].mask; in ice_flow_xtract_fld() 1056 cnt = DIV_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, in ice_flow_xtract_fld() 1060 off = flds[fld].xtrct.off; in ice_flow_xtract_fld() 1061 mask = flds[fld].xtrct.mask; in ice_flow_xtract_fld() 1735 u64 bit = BIT_ULL(fld); in ice_flow_set_fld_ext() 1741 seg->fields[fld].type = field_type; in ice_flow_set_fld_ext() [all …]
|
/linux/drivers/perf/ |
A D | arm_spe_pmu.c | 937 int fld; in __arm_spe_pmu_dev_probe() local 944 if (!fld) { in __arm_spe_pmu_dev_probe() 947 fld, smp_processor_id()); in __arm_spe_pmu_dev_probe() 950 spe_pmu->pmsver = (u16)fld; in __arm_spe_pmu_dev_probe() 962 spe_pmu->align = 1 << fld; in __arm_spe_pmu_dev_probe() 965 fld, smp_processor_id()); in __arm_spe_pmu_dev_probe() 991 switch (fld) { in __arm_spe_pmu_dev_probe() 1015 fld); in __arm_spe_pmu_dev_probe() 1026 fld, smp_processor_id()); in __arm_spe_pmu_dev_probe() 1031 switch (fld) { in __arm_spe_pmu_dev_probe() [all …]
|
/linux/drivers/power/supply/ |
A D | mp2629_charger.c | 172 enum mp2629_field fld, in mp2629_get_prop() argument 178 ret = regmap_field_read(charger->regmap_fields[fld], &rval); in mp2629_get_prop() 182 val->intval = rval * props[fld].step + props[fld].min; in mp2629_get_prop() 188 enum mp2629_field fld, in mp2629_set_prop() argument 193 if (val->intval < props[fld].min || val->intval > props[fld].max) in mp2629_set_prop() 196 rval = (val->intval - props[fld].min) / props[fld].step; in mp2629_set_prop() 197 return regmap_field_write(charger->regmap_fields[fld], rval); in mp2629_set_prop()
|
/linux/include/net/ |
A D | dn_fib.h | 88 int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld, 108 const struct flowidn *fld, struct dn_fib_res *res); 111 void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res); 127 int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
|
A D | dn_route.h | 68 struct flowidn fld; member 83 return rt->fld.flowidn_iif != 0; in dn_is_input_route() 88 return rt->fld.flowidn_iif == 0; in dn_is_output_route()
|
A D | dn.h | 190 static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp) in dn_sk_ports_copy() argument 192 fld->fld_sport = scp->addrloc; in dn_sk_ports_copy() 193 fld->fld_dport = scp->addrrem; in dn_sk_ports_copy()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
A D | tout.c | 107 #define MLX5_TIMEOUT_QUERY(fld, reg_out) \ argument 113 time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \ 120 #define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \ argument 122 u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
|
/linux/tools/lib/bpf/ |
A D | bpf_core_read.h | 44 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ argument 47 __CORE_RELO(src, fld, BYTE_SIZE), \ 48 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) 54 #define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ argument 56 (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ 57 __CORE_RELO(src, fld, BYTE_SIZE), \ 58 (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
|
/linux/drivers/media/platform/ti-vpe/ |
A D | vpdma.h | 200 #define ADB_ADDR(dma_buf, str, fld) ((dma_buf)->addr + offsetof(str, fld)) argument 201 #define MMR_ADB_ADDR(buf, str, fld) ADB_ADDR(&(buf), struct str, fld) argument
|
/linux/drivers/iommu/arm/arm-smmu-v3/ |
A D | arm-smmu-v3-sva.c | 395 unsigned long reg, fld; in arm_smmu_sva_supported() local 415 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT); in arm_smmu_sva_supported() 416 oas = id_aa64mmfr0_parange_to_phys_shift(fld); in arm_smmu_sva_supported() 421 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT); in arm_smmu_sva_supported() 422 asid_bits = fld ? 16 : 8; in arm_smmu_sva_supported()
|
/linux/drivers/irqchip/ |
A D | irq-gic-v4.c | 95 unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); in gic_cpuif_has_vsgi() local 97 fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT); in gic_cpuif_has_vsgi() 99 return fld >= 0x3; in gic_cpuif_has_vsgi()
|
/linux/arch/arm64/mm/ |
A D | context.c | 45 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), in get_cpu_asid_bits() local 48 switch (fld) { in get_cpu_asid_bits() 51 smp_processor_id(), fld); in get_cpu_asid_bits()
|
/linux/scripts/coccinelle/null/ |
A D | kmerr.cocci | 23 identifier f,fld; 27 ... when != x->fld
|