/linux/arch/x86/kernel/cpu/ |
A D | intel_epb.c | 63 u64 epb; in intel_epb_save() local 65 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); in intel_epb_save() 70 this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED); in intel_epb_save() 78 u64 epb; in intel_epb_restore() local 80 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); in intel_epb_restore() 91 val = epb & EPB_MASK; in intel_epb_restore() 125 u64 epb; in energy_perf_bias_show() local 128 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); in energy_perf_bias_show() 132 return sprintf(buf, "%llu\n", epb); in energy_perf_bias_show() 140 u64 epb, val; in energy_perf_bias_store() local [all …]
|
/linux/net/sctp/ |
A D | proc.c | 86 asoc = sctp_assoc(epb); in sctp_seq_dump_local_addrs() 164 struct sctp_ep_common *epb; in sctp_eps_seq_show() local 175 ep = sctp_ep(epb); in sctp_eps_seq_show() 176 sk = epb->sk; in sctp_eps_seq_show() 181 epb->bind_addr.port, in sctp_eps_seq_show() 185 sctp_seq_dump_local_addrs(seq, epb); in sctp_eps_seq_show() 247 struct sctp_ep_common *epb; in sctp_assocs_seq_show() local 261 epb = &assoc->base; in sctp_assocs_seq_show() 262 sk = epb->sk; in sctp_assocs_seq_show() 274 epb->bind_addr.port, in sctp_assocs_seq_show() [all …]
|
A D | input.c | 749 struct sctp_ep_common *epb; in __sctp_hash_endpoint() local 751 epb = &ep->base; in __sctp_hash_endpoint() 752 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); in __sctp_hash_endpoint() 753 head = &sctp_ep_hashtable[epb->hashent]; in __sctp_hash_endpoint() 814 struct sctp_ep_common *epb; in __sctp_unhash_endpoint() local 816 epb = &ep->base; in __sctp_unhash_endpoint() 818 epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port); in __sctp_unhash_endpoint() 820 head = &sctp_ep_hashtable[epb->hashent]; in __sctp_unhash_endpoint() 826 hlist_del_init(&epb->node); in __sctp_unhash_endpoint() 859 struct sctp_ep_common *epb; in __sctp_rcv_lookup_endpoint() local [all …]
|
A D | socket.c | 5302 struct sctp_ep_common *epb; in sctp_for_each_endpoint() local 5308 sctp_for_each_hentry(epb, &head->chain) { in sctp_for_each_endpoint() 5309 err = cb(sctp_ep(epb), p); in sctp_for_each_endpoint()
|
/linux/include/linux/ |
A D | dqblk_qtree.h | 49 unsigned int epb = info->dqi_usable_bs >> 2; in qtree_depth() local 50 unsigned long long entries = epb; in qtree_depth() 54 entries *= epb; in qtree_depth()
|
/linux/fs/quota/ |
A D | quota_tree.c | 28 unsigned int epb = info->dqi_usable_bs >> 2; in __get_index() local 32 id /= epb; in __get_index() 33 return id % epb; in __get_index() 691 unsigned int epb = info->dqi_usable_bs >> 2; in find_next_id() local 699 level_inc *= epb; in find_next_id() 707 for (i = __get_index(info, *id, depth); i < epb; i++) { in find_next_id() 720 if (i == epb) { in find_next_id()
|
/linux/fs/nilfs2/ |
A D | alloc.c | 733 const unsigned int epb = NILFS_MDT(inode)->mi_entries_per_block; in nilfs_palloc_freev() local 762 entry_start = rounddown(group_offset, epb); in nilfs_palloc_freev() 781 group_offset < entry_start + epb) { in nilfs_palloc_freev() 788 end = entry_start + epb; in nilfs_palloc_freev() 800 entry_start = rounddown(group_offset, epb); in nilfs_palloc_freev()
|
/linux/fs/ocfs2/ |
A D | quota_local.c | 58 int epb = ol_quota_entries_per_block(sb); in ol_dqblk_block() local 60 return ol_quota_chunk_block(sb, c) + 1 + off / epb; in ol_dqblk_block() 65 int epb = ol_quota_entries_per_block(sb); in ol_dqblk_block_off() local 67 return (off % epb) * sizeof(struct ocfs2_local_disk_dqblk); in ol_dqblk_block_off() 85 int epb = ol_quota_entries_per_block(sb); in ol_dqblk_chunk_off() local 88 ol_quota_chunk_block(sb, c) - 1) * epb in ol_dqblk_chunk_off() 1103 int epb = ol_quota_entries_per_block(sb); in ocfs2_extend_local_quota_file() local 1199 *offset = chunk_blocks * epb; in ocfs2_extend_local_quota_file()
|
/linux/tools/power/x86/x86_energy_perf_policy/ |
A D | x86_energy_perf_policy.c | 896 int epb; in print_cpu_msrs() local 898 epb = get_epb(cpu); in print_cpu_msrs() 899 if (epb >= 0) in print_cpu_msrs() 900 printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb); in print_cpu_msrs() 1183 int epb; in update_cpu_msrs() local 1186 epb = get_epb(cpu); in update_cpu_msrs() 1191 cpu, epb, (unsigned int) new_epb); in update_cpu_msrs()
|
/linux/drivers/cpufreq/ |
A D | intel_pstate.c | 601 u64 epb; in intel_pstate_get_epb() local 607 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); in intel_pstate_get_epb() 611 return (s16)(epb & 0x0f); in intel_pstate_get_epb() 640 u64 epb; in intel_pstate_set_epb() local 646 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); in intel_pstate_set_epb() 650 epb = (epb & ~0x0f) | pref; in intel_pstate_set_epb() 651 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); in intel_pstate_set_epb()
|
/linux/include/net/sctp/ |
A D | sctp.h | 512 #define sctp_for_each_hentry(epb, head) \ argument 513 hlist_for_each_entry(epb, head, node)
|
/linux/fs/ext4/ |
A D | indirect.c | 569 unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); in ext4_ind_map_blocks() local 580 count = count * epb + (epb - offsets[i] - 1); in ext4_ind_map_blocks()
|
/linux/tools/power/x86/turbostat/ |
A D | turbostat.c | 1919 int ret, epb = -1; in get_epb() local 1928 ret = fscanf(fp, "%d", &epb); in get_epb() 1934 return epb; in get_epb() 4126 int cpu, epb; in print_epb() local 4142 epb = get_epb(cpu); in print_epb() 4143 if (epb < 0) in print_epb() 4146 switch (epb) { in print_epb() 4160 fprintf(outf, "cpu%d: EPB: %d (%s)\n", cpu, epb, epb_string); in print_epb()
|