/linux/drivers/media/dvb-core/ |
A D | dvb_ringbuffer.c | 136 size_t split; in dvb_ringbuffer_read_user() local 142 buf += split; in dvb_ringbuffer_read_user() 143 todo -= split; in dvb_ringbuffer_read_user() 162 size_t split; in dvb_ringbuffer_read() local 167 buf += split; in dvb_ringbuffer_read() 185 size_t split; in dvb_ringbuffer_write() local 191 buf += split; in dvb_ringbuffer_write() 212 size_t split; in dvb_ringbuffer_write_user() local 220 buf += split; in dvb_ringbuffer_write_user() 256 size_t split; in dvb_ringbuffer_pkt_read_user() local [all …]
|
/linux/lib/ |
A D | sg_split.c | 81 struct sg_splitter *split; in sg_split_phys() local 83 for (i = 0, split = splitters; i < nb_splits; i++, split++) { in sg_split_phys() 84 in_sg = split->in_sg0; in sg_split_phys() 85 out_sg = split->out_sg; in sg_split_phys() 89 out_sg->offset += split->skip_sg0; in sg_split_phys() 90 out_sg->length -= split->skip_sg0; in sg_split_phys() 107 struct sg_splitter *split; in sg_split_mapped() local 109 for (i = 0, split = splitters; i < nb_splits; i++, split++) { in sg_split_mapped() 110 in_sg = split->in_sg0; in sg_split_mapped() 111 out_sg = split->out_sg; in sg_split_mapped() [all …]
|
/linux/drivers/virtio/ |
A D | virtio_ring.c | 142 } split; member 525 desc = vq->split.vring.desc; in virtqueue_add_split() 579 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split() 615 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split() 621 vq->split.avail_idx_shadow++; in virtqueue_add_split() 2217 vq->split.vring = vring; in __vring_new_virtqueue() 2231 if (!vq->split.desc_state) in __vring_new_virtqueue() 2235 if (!vq->split.desc_extra) in __vring_new_virtqueue() 2333 vq->split.vring.desc, in vring_del_virtqueue() 2438 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr() [all …]
|
/linux/arch/x86/kernel/cpu/ |
A D | cacheinfo.c | 137 } split; member 146 } split; member 153 } split; member 285 eax->split.is_self_initializing = 1; in amd_cpuid4() 286 eax->split.type = types[leaf]; in amd_cpuid4() 287 eax->split.level = levels[leaf]; in amd_cpuid4() 288 eax->split.num_threads_sharing = 0; in amd_cpuid4() 293 eax->split.is_fully_associative = 1; in amd_cpuid4() 615 if (eax.split.type == CTYPE_NULL) in cpuid4_cache_lookup_regs() 753 switch (this_leaf.eax.split.level) { in init_intel_cacheinfo() [all …]
|
/linux/fs/jfs/ |
A D | jfs_xtree.c | 96 uint split; member 597 split.mp = mp; in xtInsert() 698 smp = split->mp; in xtSplitUp() 724 XT_PUTENTRY(xad, split->flag, split->off, split->len, in xtSplitUp() 1051 XT_PUTENTRY(xad, split->flag, split->off, split->len, in xtSplitPage() 1122 XT_PUTENTRY(xad, split->flag, split->off, split->len, in xtSplitPage() 1147 XT_PUTENTRY(xad, split->flag, split->off, split->len, in xtSplitPage() 1287 XT_PUTENTRY(xad, split->flag, split->off, split->len, split->addr); in xtSplitRoot() 1420 split.mp = mp; in xtExtend() 1585 split.mp = mp; in xtTailgate() [all …]
|
A D | jfs_dtree.c | 858 split.mp = mp; in dtInsert() 860 split.nslot = n; in dtInsert() 945 smp = split->mp; in dtSplitUp() 1345 smp = split->mp; in dtSplitPage() 1450 dtInsertEntry(rp, 0, split->key, split->data, &rdtlck); in dtSplitPage() 1588 dtInsertEntry(sp, skip, split->key, split->data, &sdtlck); in dtSplitPage() 1608 dtInsertEntry(rp, skip, split->key, split->data, &rdtlck); in dtSplitPage() 1656 smp = split->mp; in dtExtendPage() 1809 dtInsertEntry(sp, split->index, split->key, split->data, &dtlck); in dtExtendPage() 1887 smp = split->mp; in dtSplitRoot() [all …]
|
/linux/Documentation/vm/ |
A D | split_page_table_lock.rst | 10 scalability, split page table lock was introduced. 12 With split page table lock we have separate per-table lock to serialize 13 access to the table. At the moment we use split lock for PTE and PMD 35 If split lock is disabled, all tables are guarded by mm->page_table_lock. 40 Hugetlb and split page table lock 43 Hugetlb can support several page sizes. We use split lock only for PMD 49 takes pmd split lock for PMD_SIZE page, mm->page_table_lock 54 Support of split page table lock by an architecture 65 PMD split lock only makes sense if you have more than two page table 83 page->ptl is used to access split page table lock, where 'page' is struct [all …]
|
/linux/security/apparmor/ |
A D | lib.c | 50 if (split) { in aa_split_fqname() 52 *split++ = 0; in aa_split_fqname() 53 if (strncmp(split, "//", 2) == 0) in aa_split_fqname() 54 split += 2; in aa_split_fqname() 55 name = skip_spaces(split); in aa_split_fqname() 100 if (split) { in aa_splitn_fqname() 101 *ns_len = split - *ns_name; in aa_splitn_fqname() 104 split++; in aa_splitn_fqname() 105 if (end - split > 1 && strncmp(split, "//", 2) == 0) in aa_splitn_fqname() 106 split += 2; in aa_splitn_fqname() [all …]
|
A D | policy_ns.c | 203 const char *split; in __aa_lookupn_ns() local 205 for (split = strnstr(hname, "//", n); split; in __aa_lookupn_ns() 206 split = strnstr(hname, "//", n)) { in __aa_lookupn_ns() 207 ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname); in __aa_lookupn_ns() 211 n -= split + 2 - hname; in __aa_lookupn_ns() 212 hname = split + 2; in __aa_lookupn_ns()
|
A D | policy.c | 365 char *split; in __lookup_parent() local 369 for (split = strstr(hname, "//"); split;) { in __lookup_parent() 371 split - hname); in __lookup_parent() 375 hname = split + 2; in __lookup_parent() 376 split = strstr(hname, "//"); in __lookup_parent() 399 const char *split; in __lookupn_profile() local 401 for (split = strnstr(hname, "//", n); split; in __lookupn_profile() 402 split = strnstr(hname, "//", n)) { in __lookupn_profile() 404 split - hname); in __lookupn_profile() 409 n -= split + 2 - hname; in __lookupn_profile() [all …]
|
/linux/lib/zstd/compress/ |
A D | zstd_ldm.c | 264 BYTE const* const split = ip + splits[n] - minMatchLength; in ZSTD_ldm_fillHashTable() local 265 U64 const xxhash = xxh64(split, minMatchLength, 0); in ZSTD_ldm_fillHashTable() 269 entry.offset = (U32)(split - base); in ZSTD_ldm_fillHashTable() 350 BYTE const* const split = ip + splits[n] - minMatchLength; in ZSTD_ldm_generateSequences_internal() local 351 U64 const xxhash = xxh64(split, minMatchLength, 0); in ZSTD_ldm_generateSequences_internal() 354 candidates[n].split = split; in ZSTD_ldm_generateSequences_internal() 364 BYTE const* const split = candidates[n].split; in ZSTD_ldm_generateSequences_internal() local 372 newEntry.offset = (U32)(split - base); in ZSTD_ldm_generateSequences_internal() 378 if (split < anchor) { in ZSTD_ldm_generateSequences_internal() 433 U32 const offset = (U32)(split - base) - bestEntry->offset; in ZSTD_ldm_generateSequences_internal() [all …]
|
/linux/fs/btrfs/ |
A D | file.c | 556 if (!split) in btrfs_drop_extent_cache() 602 split->block_len = split->len; in btrfs_drop_extent_cache() 603 split->orig_block_len = max(split->block_len, in btrfs_drop_extent_cache() 607 split->orig_start = split->start; in btrfs_drop_extent_cache() 611 split->ram_bytes = split->len; in btrfs_drop_extent_cache() 641 split->block_len = split->len; in btrfs_drop_extent_cache() 647 split->ram_bytes = split->len; in btrfs_drop_extent_cache() 648 split->orig_start = split->start; in btrfs_drop_extent_cache() 675 if (split) in btrfs_drop_extent_cache() 1103 u64 split; in btrfs_mark_extent_written() local [all …]
|
/linux/net/sched/ |
A D | sch_cbq.c | 913 struct cbq_class *split = cl->split; in cbq_sync_defmap() local 917 if (split == NULL) in cbq_sync_defmap() 936 if (c->split == split && c->level < level && in cbq_sync_defmap() 951 split = cl->split; in cbq_change_defmap() 952 if (!split) in cbq_change_defmap() 957 if (split == NULL || split->common.classid != splitid) { in cbq_change_defmap() 958 for (split = cl->tparent; split; split = split->tparent) in cbq_change_defmap() 963 if (split == NULL) in cbq_change_defmap() 966 if (cl->split != split) { in cbq_change_defmap() 969 cl->split = split; in cbq_change_defmap() [all …]
|
/linux/Documentation/x86/ |
A D | buslock.rst | 16 A split lock is any atomic operation whose operand crosses two cache lines. 20 A bus lock is acquired through either split locked access to writeback (WB) 29 mechanisms to detect split locks and bus locks. 31 #AC exception for split lock detection 34 Beginning with the Tremont Atom CPU split lock operations may raise an 35 Alignment Check (#AC) exception when a split lock operation is attemped. 51 |split_lock_detect=|#AC for split lock |#DB for bus lock | 83 generating split lock and bus lock to block the hard real time code to 96 Disable checking for split lock and bus lock. This option can be useful if
|
/linux/tools/hv/ |
A D | vmbus_testing | 154 f_name = f_path.split("/")[-1] 169 .format(device.split("/")[5])) 172 .format(device.split("/")[5])) 209 .format(state_path.split("/")[5])) 213 .format(state_path.split("/")[5])) 238 interrupt.split("/")[5])) 241 message.split("/")[5])) 263 print("ALL testing now OFF for {}".format(device.split("/")[-1]))
|
/linux/arch/x86/include/asm/ |
A D | perf_event.h | 119 } split; member 132 } split; member 143 } split; member 159 } split; member 171 } split; member 183 } split; member
|
/linux/arch/x86/kvm/vmx/ |
A D | pmu_intel.c | 485 pmu->version = eax.split.version_id; in intel_pmu_refresh() 491 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh() 493 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); in intel_pmu_refresh() 494 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh() 495 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); in intel_pmu_refresh() 497 ((1ull << eax.split.mask_length) - 1); in intel_pmu_refresh() 503 min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh() 505 edx.split.bit_width_fixed = min_t(int, in intel_pmu_refresh() 506 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); in intel_pmu_refresh() 508 ((u64)1 << edx.split.bit_width_fixed) - 1; in intel_pmu_refresh()
|
/linux/scripts/ |
A D | jobserver-exec | 23 opts = [x for x in flags.split(" ") if x.startswith("--jobserver")] 26 fds = opts[0].split("=", 1)[1] 27 reader, writer = [int(x) for x in fds.split(",", 1)]
|
A D | checkkconfigsymbols.py | 147 split = args.diff.split("..") 148 commit_a = split[0] 149 commit_b = split[1] 200 commit = commit.split(" ", 1) 241 return [x for x in commits.split("\n") if x] 447 line = line.split("#")[0] # ignore comments
|
/linux/security/apparmor/include/ |
A D | lib.h | 148 char *split; in basename() local 151 for (split = strstr(hname, "//"); split; split = strstr(hname, "//")) in basename() 152 hname = split + 2; in basename()
|
/linux/Documentation/sphinx/ |
A D | maintainers_include.py | 122 field, details = line.split(':', 1) 148 for separated in output.split('\n'): 163 for separated in field_content.split('\n'): 183 (path, tail) = os.path.split(path)
|
A D | kerneldoc.py | 84 export_file_patterns = str(self.options.get('export')).split() 87 export_file_patterns = str(self.options.get('internal')).split() 91 identifiers = self.options.get('identifiers').split() 99 no_identifiers = self.options.get('no-identifiers').split()
|
/linux/block/ |
A D | blk-merge.c | 295 goto split; in blk_bio_segment_split() 304 goto split; in blk_bio_segment_split() 313 split: in blk_bio_segment_split() 341 struct bio *split = NULL; in __blk_queue_split() local 349 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, in __blk_queue_split() 353 split = blk_bio_write_same_split(q, *bio, &q->bio_split, in __blk_queue_split() 361 if (split) { in __blk_queue_split() 363 split->bi_opf |= REQ_NOMERGE; in __blk_queue_split() 365 bio_chain(split, *bio); in __blk_queue_split() 366 trace_block_split(split, (*bio)->bi_iter.bi_sector); in __blk_queue_split() [all …]
|
/linux/fs/nfs/blocklayout/ |
A D | extent_tree.c | 297 struct pnfs_block_extent *split; in ext_tree_insert() local 301 split = kmemdup(new, sizeof(*new), GFP_ATOMIC); in ext_tree_insert() 302 if (!split) { in ext_tree_insert() 307 split->be_length = be->be_f_offset - split->be_f_offset; in ext_tree_insert() 308 split->be_device = nfs4_get_deviceid(new->be_device); in ext_tree_insert() 309 __ext_tree_insert(root, split, true); in ext_tree_insert() 381 sector_t split) in ext_tree_split() argument 390 be->be_length = split - be->be_f_offset; in ext_tree_split() 392 new->be_f_offset = split; in ext_tree_split()
|
/linux/drivers/firewire/ |
A D | nosy.c | 161 size_t split = end - buffer->head->data; in packet_buffer_get() local 163 if (copy_to_user(data, buffer->head->data, split)) in packet_buffer_get() 165 if (copy_to_user(data + split, buffer->data, length - split)) in packet_buffer_get() 167 buffer->head = (struct packet *) &buffer->data[length - split]; in packet_buffer_get() 200 size_t split = end - buffer->tail->data; in packet_buffer_put() local 202 memcpy(buffer->tail->data, data, split); in packet_buffer_put() 203 memcpy(buffer->data, data + split, length - split); in packet_buffer_put() 204 buffer->tail = (struct packet *) &buffer->data[length - split]; in packet_buffer_put()
|