/linux/drivers/gpu/drm/i915/gt/uc/ |
A D | intel_guc_ct.c | 17 return container_of(ct, struct intel_guc, ct); in ct_to_guc() 27 return ct_to_gt(ct)->i915; in ct_to_i915() 32 return &ct_to_i915(ct)->drm; in ct_to_drm() 250 GEM_BUG_ON(ct->vma); in intel_guc_ct_init() 299 memset(ct, 0, sizeof(*ct)); in intel_guc_ct_fini() 318 GEM_BUG_ON(!ct->vma); in intel_guc_ct_enable() 350 ct->enabled = true; in intel_guc_ct_enable() 372 ct->enabled = false; in intel_guc_ct_disable() 587 if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) { in has_room_nb() 1152 struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet); in ct_receive_tasklet_func() local [all …]
|
/linux/drivers/video/fbdev/aty/ |
A D | mach64_ct.c | 268 …ret = par->ref_clk_per * pll->ct.pll_ref_div * pll->ct.vclk_post_div_real / pll->ct.vclk_fb_div / … in aty_pll_to_var_ct() 270 if(pll->ct.xres > 0) { in aty_pll_to_var_ct() 272 ret /= pll->ct.xres; in aty_pll_to_var_ct() 295 pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl); in aty_set_pll_ct() 300 pll->ct.pll_ref_div, pll->ct.vclk_post_div, pll->ct.vclk_post_div_real); in aty_set_pll_ct() 413 pll->ct.xclk_post_div = pll->ct.pll_ext_cntl & 0x07; in aty_init_pll_ct() 436 __func__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div); in aty_init_pll_ct() 489 if (pll->ct.xclkmaxrasdelay <= pll->ct.xclkpagefaultdelay) in aty_init_pll_ct() 490 pll->ct.xclkmaxrasdelay = pll->ct.xclkpagefaultdelay + 1; in aty_init_pll_ct() 542 pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; in aty_init_pll_ct() [all …]
|
/linux/net/netfilter/ |
A D | nf_conntrack_core.c | 522 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list() 537 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list() 551 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list() 708 nf_ct_put(ct); in nf_ct_delete() 1404 return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct); in gc_worker_skip_ct() 1575 return ct; in __nf_conntrack_alloc() 1667 ct, exp); in init_conntrack() 1911 if (!ct) { in nf_conntrack_in() 2209 if (!ct) in nf_conntrack_update() 2233 if (ct) { in nf_conntrack_get_tuple_skb() [all …]
|
A D | nf_conntrack_h323_main.c | 64 struct nf_conn *ct, 69 struct nf_conn *ct, 74 struct nf_conn *ct, 83 struct nf_conn *ct, 90 struct nf_conn *ct, 97 struct nf_conn *ct, 104 struct nf_conn *ct, 381 struct nf_conn *ct, in process_h245_channel() argument 782 struct nf_conn *ct, in expect_callforwarding() argument 805 nf_ct_l3num(ct))) { in expect_callforwarding() [all …]
|
A D | nf_conntrack_proto_tcp.c | 774 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); in tcp_new() 790 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp)); in tcp_new() 866 if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th)) in nf_conntrack_tcp_packet() 869 spin_lock_bh(&ct->lock); in nf_conntrack_tcp_packet() 905 if (nf_ct_kill(ct)) in nf_conntrack_tcp_packet() 936 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end = in nf_conntrack_tcp_packet() 938 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend = in nf_conntrack_tcp_packet() 940 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin = in nf_conntrack_tcp_packet() 943 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = in nf_conntrack_tcp_packet() 946 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = in nf_conntrack_tcp_packet() [all …]
|
A D | nf_nat_sip.c | 45 if (nf_ct_protonum(ct) == IPPROTO_TCP) { in mangle_packet() 73 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr() 86 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr_port() 173 if (nf_ct_protonum(ct) == IPPROTO_TCP) in nf_nat_sip() 189 &ct->tuplehash[dir].tuple.src.u3) || in nf_nat_sip() 194 &ct->tuplehash[dir].tuple.dst.u3) || in nf_nat_sip() 215 buflen = sip_sprintf_addr(ct, buffer, in nf_nat_sip() 216 &ct->tuplehash[!dir].tuple.dst.u3, in nf_nat_sip() 232 buflen = sip_sprintf_addr(ct, buffer, in nf_nat_sip() 233 &ct->tuplehash[!dir].tuple.src.u3, in nf_nat_sip() [all …]
|
A D | nf_conntrack_proto_dccp.c | 491 if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state)) in nf_conntrack_dccp_packet() 501 spin_lock_bh(&ct->lock); in nf_conntrack_dccp_packet() 545 spin_unlock_bh(&ct->lock); in nf_conntrack_dccp_packet() 549 spin_unlock_bh(&ct->lock); in nf_conntrack_dccp_packet() 554 ct->proto.dccp.last_dir = dir; in nf_conntrack_dccp_packet() 557 spin_unlock_bh(&ct->lock); in nf_conntrack_dccp_packet() 597 spin_lock_bh(&ct->lock); in dccp_to_nlattr() 615 spin_unlock_bh(&ct->lock); in dccp_to_nlattr() 620 spin_unlock_bh(&ct->lock); in dccp_to_nlattr() 658 spin_lock_bh(&ct->lock); in nlattr_to_dccp() [all …]
|
A D | xt_CT.c | 26 if (ct) { in xt_ct_target() 40 struct nf_conn *ct = info->ct; in xt_ct_target_v0() local 49 struct nf_conn *ct = info->ct; in xt_ct_target_v1() local 144 struct nf_conn *ct; in xt_ct_tg_check() local 148 ct = NULL; in xt_ct_tg_check() 170 if (!ct) { in xt_ct_tg_check() 206 info->ct = ct; in xt_ct_tg_check() 241 info->ct = info_v1.ct; in xt_ct_tg_check_v0() 269 struct nf_conn *ct = info->ct; in xt_ct_tg_destroy() local 272 if (ct) { in xt_ct_tg_destroy() [all …]
|
A D | nf_conntrack_netlink.c | 715 struct nf_conn *ct = item->ct; in ctnetlink_conntrack_event() local 1626 nf_ct_put(ct); in ctnetlink_del_conntrack() 1686 nf_ct_put(ct); in ctnetlink_get_conntrack() 2350 memset(&ct->proto, 0, sizeof(ct->proto)); in ctnetlink_create_conntrack() 2398 return ct; in ctnetlink_create_conntrack() 2501 nf_ct_put(ct); in ctnetlink_new_conntrack() 2740 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) in __ctnetlink_glue_build() 2743 if (ct->master && ctnetlink_dump_master(skb, ct) < 0) in __ctnetlink_glue_build() 2754 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) in __ctnetlink_glue_build() 3302 c.data = ct; in ctnetlink_dump_exp_ct() [all …]
|
A D | nf_nat_core.c | 123 const struct nf_conn *ct; in __nf_nat_decode_session() local 130 if (ct == NULL) in __nf_nat_decode_session() 133 family = nf_ct_l3num(ct); in __nf_nat_decode_session() 408 if (!ct->master) in nf_nat_l4proto_unique_tuple() 507 struct nf_conn *ct, in get_unique_tuple() argument 513 zone = nf_ct_zone(ct); in get_unique_tuple() 620 if (nfct_help(ct) && !nfct_seqadj(ct)) in nf_nat_setup_info() 716 struct nf_conn *ct; in nf_nat_inet_fn() local 731 nat = nfct_nat(ct); in nf_nat_inet_fn() 765 ct, ct->status); in nf_nat_inet_fn() [all …]
|
A D | nf_conntrack_sip.c | 150 if (!ct) in sip_parse_addr() 154 switch (nf_ct_l3num(ct)) { in sip_parse_addr() 641 *proto = nf_ct_protonum(ct); in ct_sip_parse_transport() 654 switch (nf_ct_l3num(ct)) { in sdp_parse_addr() 878 switch (nf_ct_l3num(ct)) { in set_expected_rtp_rtcp() 1106 nf_ct_helper_log(skb, ct, in process_sdp() 1148 flush_expectations(ct, true); in process_invite_response() 1165 flush_expectations(ct, true); in process_update_response() 1196 flush_expectations(ct, true); in process_invite_request() 1211 flush_expectations(ct, true); in process_bye_request() [all …]
|
A D | nf_conntrack_proto_sctp.c | 280 memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); in sctp_new() 386 if (!nf_ct_is_confirmed(ct)) { in nf_conntrack_sctp_packet() 411 spin_lock_bh(&ct->lock); in nf_conntrack_sctp_packet() 498 spin_unlock_bh(&ct->lock); in nf_conntrack_sctp_packet() 521 spin_unlock_bh(&ct->lock); in nf_conntrack_sctp_packet() 528 switch (ct->proto.sctp.state) { in sctp_can_early_drop() 550 spin_lock_bh(&ct->lock); in sctp_to_nlattr() 568 spin_unlock_bh(&ct->lock); in sctp_to_nlattr() 574 spin_unlock_bh(&ct->lock); in sctp_to_nlattr() 609 spin_lock_bh(&ct->lock); in nlattr_to_sctp() [all …]
|
A D | nf_conntrack_ecache.c | 66 e = nf_ct_ecache_find(ct); in ecache_work_evict_list() 80 refs[evicted] = ct; in ecache_work_evict_list() 138 struct nf_conn *ct = item->ct; in __nf_conntrack_eventmask_report() local 160 spin_lock_bh(&ct->lock); in __nf_conntrack_eventmask_report() 165 spin_unlock_bh(&ct->lock); in __nf_conntrack_eventmask_report() 181 e = nf_ct_ecache_find(ct); in nf_conntrack_eventmask_report() 187 item.ct = ct; in nf_conntrack_eventmask_report() 216 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct)) in nf_ct_deliver_cached_events() 219 e = nf_ct_ecache_find(ct); in nf_ct_deliver_cached_events() 225 item.ct = ct; in nf_ct_deliver_cached_events() [all …]
|
A D | nf_conntrack_pptp.c | 127 nf_nat_pptp_expectfn(ct, exp); in pptp_expectfn() 160 zone = nf_ct_zone(ct); in destroy_sibling_or_exp() 189 nf_ct_gre_keymap_destroy(ct); in pptp_destroy_siblings() 227 nf_ct_l3num(ct), in exp_gre() 236 nf_ct_l3num(ct), in exp_gre() 278 struct nf_conn *ct, in pptp_inbound_pkt() argument 328 exp_gre(ct, cid, pcid); in pptp_inbound_pkt() 362 exp_gre(ct, cid, pcid); in pptp_inbound_pkt() 372 pptp_destroy_siblings(ct); in pptp_inbound_pkt() 406 struct nf_conn *ct, in pptp_outbound_pkt() argument [all …]
|
A D | xt_conntrack.c | 40 conntrack_mt_origsrc(const struct nf_conn *ct, in conntrack_mt_origsrc() argument 49 conntrack_mt_origdst(const struct nf_conn *ct, in conntrack_mt_origdst() argument 83 (nf_ct_protonum(ct) == info->l4proto) ^ in ct_proto_port_check() 121 const struct nf_conn *ct) in ct_proto_port_check_v3() argument 167 const struct nf_conn *ct; in conntrack_mt() local 170 ct = nf_ct_get(skb, &ctinfo); in conntrack_mt() 172 if (ct) in conntrack_mt() 180 if (ct != NULL) { in conntrack_mt() 191 if (ct == NULL) in conntrack_mt() 219 if (!ct_proto_port_check(info, ct)) in conntrack_mt() [all …]
|
A D | nft_ct.c | 76 if (ct) in nft_ct_get_eval() 88 if (ct == NULL) in nft_ct_get_eval() 96 *dest = ct->status; in nft_ct_get_eval() 100 *dest = ct->mark; in nft_ct_get_eval() 267 if (!ct) { in nft_ct_set_zone_eval() 291 if (ct == NULL || nf_ct_is_template(ct)) in nft_ct_set_eval() 355 if (!ct) in nft_ct_tmpl_put_pcpu() 357 nf_ct_put(ct); in nft_ct_tmpl_put_pcpu() 842 if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct)) in nft_ct_timeout_obj_eval() 1075 if (!ct || in nft_ct_helper_obj_eval() [all …]
|
/linux/drivers/macintosh/ |
A D | windfarm.h | 30 s32 (*get_min)(struct wf_control *ct); 31 s32 (*get_max)(struct wf_control *ct); 32 void (*release)(struct wf_control *ct); 63 s32 vmax = ct->ops->get_max(ct); in wf_control_set_max() 64 return ct->ops->set_value(ct, vmax); in wf_control_set_max() 69 s32 vmin = ct->ops->get_min(ct); in wf_control_set_min() 70 return ct->ops->set_value(ct, vmin); in wf_control_set_min() 75 return ct->ops->set_value(ct, val); in wf_control_set() 80 return ct->ops->get_value(ct, val); in wf_control_get() 85 return ct->ops->get_min(ct); in wf_control_get_min() [all …]
|
/linux/include/net/netfilter/ |
A D | nf_conntrack.h | 141 #define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple) argument 150 return read_pnet(&ct->ct_net); in nf_ct_net() 175 WARN_ON(!ct); in nf_ct_put() 176 nf_conntrack_put(&ct->ct_general); in nf_ct_put() 228 return nf_ct_delete(ct, 0, 0); in nf_ct_kill() 245 void nf_conntrack_free(struct nf_conn *ct); 265 return test_bit(IPS_DYING_BIT, &ct->status); in nf_ct_is_dying() 292 return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) && in nf_ct_should_gc() 293 !nf_ct_is_dying(ct); in nf_ct_should_gc() 304 if (nf_ct_expires(ct) < NF_CT_DAY / 2) in nf_ct_offload_timeout() [all …]
|
A D | nf_conntrack_ecache.h | 32 nf_ct_ecache_find(const struct nf_conn *ct) in nf_ct_ecache_find() argument 35 return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE); in nf_ct_ecache_find() 45 struct net *net = nf_ct_net(ct); in nf_ct_ecache_ext_add() 55 e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); in nf_ct_ecache_ext_add() 70 struct nf_conn *ct; member 101 struct nf_conn *ct, in nf_conntrack_eventmask_report() argument 114 struct net *net = nf_ct_net(ct); in nf_conntrack_event_cache() 120 e = nf_ct_ecache_find(ct); in nf_conntrack_event_cache() 133 const struct net *net = nf_ct_net(ct); in nf_conntrack_event_report() 148 const struct net *net = nf_ct_net(ct); in nf_conntrack_event() [all …]
|
A D | nf_conntrack_l4proto.h | 35 struct nf_conn *ct, bool destroy); 164 const struct nf_conn *ct, 180 const struct nf_conn *ct, in nf_ct_l4proto_log_invalid() argument 188 return &net->ct.nf_ct_proto.generic; in nf_generic_pernet() 193 return &net->ct.nf_ct_proto.tcp; in nf_tcp_pernet() 198 return &net->ct.nf_ct_proto.udp; in nf_udp_pernet() 203 return &net->ct.nf_ct_proto.icmp; in nf_icmp_pernet() 208 return &net->ct.nf_ct_proto.icmpv6; in nf_icmpv6_pernet() 229 return &net->ct.nf_ct_proto.dccp; in nf_dccp_pernet() 236 return &net->ct.nf_ct_proto.sctp; in nf_sctp_pernet() [all …]
|
/linux/net/netfilter/ipvs/ |
A D | ip_vs_nfct.c | 77 if (ct == NULL || nf_ct_is_confirmed(ct) || in ip_vs_update_conntrack() 78 nf_ct_is_dying(ct)) in ip_vs_update_conntrack() 95 !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) in ip_vs_update_conntrack() 121 __func__, ct, ct->status, ctinfo, in ip_vs_update_conntrack() 125 __func__, ct, ct->status, ctinfo, in ip_vs_update_conntrack() 166 __func__, ct, ct->status, ARG_CONN(cp)); in ip_vs_nfct_expect_callback() 182 __func__, ct, ct->status, ARG_CONN(cp)); in ip_vs_nfct_expect_callback() 194 __func__, ct, ct->status, ARG_TUPLE(orig)); in ip_vs_nfct_expect_callback() 217 if (ct == NULL) in ip_vs_nfct_expect_related() 245 struct nf_conn *ct; in ip_vs_conn_drop_conntrack() local [all …]
|
/linux/kernel/irq/ |
A D | generic-chip.c | 45 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg() 64 *ct->mask_cache |= mask; in irq_gc_mask_set_bit() 65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit() 84 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit() 85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit() 105 *ct->mask_cache |= mask; in irq_gc_unmask_enable_reg() 161 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_and_ack_set() 407 ct = gc->chip_types; in irq_map_generic_chip() 408 chip = &ct->chip; in irq_map_generic_chip() 525 if (ct->type & type) { in irq_setup_alt_chip() [all …]
|
/linux/net/openvswitch/ |
A D | conntrack.c | 153 return ct ? ct->mark : 0; in ovs_ct_get_mark() 195 key->ct.mark = ovs_ct_get_mark(ct); in __ovs_ct_update_key() 196 ovs_ct_get_labels(ct, &key->ct.labels); in __ovs_ct_update_key() 198 if (ct) { in __ovs_ct_update_key() 203 ct = ct->master; in __ovs_ct_update_key() 244 if (ct) { in ovs_ct_update_key() 691 if (!ct) in skb_nfct_cached() 694 if (ct) in skb_nfct_cached() 990 if (ct) { in __ovs_ct_lookup() 1085 if (ct) in ovs_ct_lookup() [all …]
|
/linux/drivers/irqchip/ |
A D | irq-brcmstb-l2.c | 85 irq_reg_writel(gc, mask, ct->regs.disable); in brcmstb_l2_mask_and_ack() 86 *ct->mask_cache &= ~mask; in brcmstb_l2_mask_and_ack() 87 irq_reg_writel(gc, mask, ct->regs.ack); in brcmstb_l2_mask_and_ack() 146 if (ct->chip.irq_ack) { in brcmstb_l2_intc_resume() 149 ct->regs.ack); in brcmstb_l2_intc_resume() 165 struct irq_chip_type *ct; in brcmstb_l2_intc_of_init() local 229 ct = data->gc->chip_types; in brcmstb_l2_intc_of_init() 232 ct->regs.ack = init_params->cpu_clear; in brcmstb_l2_intc_of_init() 233 ct->chip.irq_ack = irq_gc_ack_set_bit; in brcmstb_l2_intc_of_init() 240 ct->chip.irq_mask = irq_gc_mask_disable_reg; in brcmstb_l2_intc_of_init() [all …]
|
/linux/net/ax25/ |
A D | ax25_addr.c | 116 int ct = 0; in ax25cmp() local 118 while (ct < 6) { in ax25cmp() 119 if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */ in ax25cmp() 121 ct++; in ax25cmp() 219 int ct = 0; in ax25_addr_build() local 252 while (ct < d->ndigi) { in ax25_addr_build() 255 if (d->repeated[ct]) in ax25_addr_build() 265 ct++; in ax25_addr_build() 286 int ct; in ax25_digi_invert() local 292 for (ct = 0; ct < in->ndigi; ct++) { in ax25_digi_invert() [all …]
|