/linux/fs/btrfs/tests/ |
A D | inode-tests.c | 265 if (em->block_start != EXTENT_MAP_HOLE) { in test_btrfs_get_extent() 404 disk_bytenr = em->block_start; in test_btrfs_get_extent() 456 if (em->block_start != disk_bytenr) { in test_btrfs_get_extent() 458 disk_bytenr, em->block_start); in test_btrfs_get_extent() 519 disk_bytenr = em->block_start; in test_btrfs_get_extent() 551 em->block_start); in test_btrfs_get_extent() 585 em->block_start); in test_btrfs_get_extent() 656 disk_bytenr = em->block_start; in test_btrfs_get_extent() 693 if (em->block_start != disk_bytenr) { in test_btrfs_get_extent() 695 disk_bytenr, em->block_start); in test_btrfs_get_extent() [all …]
|
A D | extent-map-tests.c | 27 em->start, em->len, em->block_start, in free_extent_map_tree() 70 em->block_start = 0; in test_case_1() 112 em->block_start = start; in test_case_1() 127 em->block_start, em->block_len); in test_case_1() 158 em->block_start = EXTENT_MAP_INLINE; in test_case_2() 179 em->block_start = SZ_4K; in test_case_2() 241 em->block_start = SZ_4K; in __test_case_3() 262 em->block_start = 0; in __test_case_3() 282 em->block_start, em->block_len); in __test_case_3() 340 em->block_start = 0; in __test_case_4() [all …]
|
/linux/fs/isofs/ |
A D | compress.c | 49 int i, block_size = block_end - block_start; in zisofs_uncompress_block() 54 int needblocks = (block_size + (block_start & bufmask) + bufmask) in zisofs_uncompress_block() 78 blocknum = block_start >> bufshift; in zisofs_uncompress_block() 139 (block_start & bufmask); in zisofs_uncompress_block() 141 (block_start & bufmask), in zisofs_uncompress_block() 144 block_start = 0; in zisofs_uncompress_block() 205 loff_t block_start, block_end; in zisofs_fill_pages() local 241 block_start = le32_to_cpu(*(__le32 *) in zisofs_fill_pages() 257 if (block_start > block_end) { in zisofs_fill_pages() 262 ret = zisofs_uncompress_block(inode, block_start, block_end, in zisofs_fill_pages() [all …]
|
/linux/fs/reiserfs/ |
A D | file.c | 173 unsigned block_start, block_end; in reiserfs_commit_page() local 195 for (bh = head = page_buffers(page), block_start = 0; in reiserfs_commit_page() 196 bh != head || !block_start; in reiserfs_commit_page() 197 block_start = block_end, bh = bh->b_this_page) { in reiserfs_commit_page() 201 block_end = block_start + blocksize; in reiserfs_commit_page() 202 if (block_end <= from || block_start >= to) { in reiserfs_commit_page()
|
/linux/fs/cramfs/ |
A D | inode.c | 831 u32 block_ptr, block_start, block_len; in cramfs_readpage() local 856 cramfs_read(sb, block_start, 2); in cramfs_readpage() 857 block_start += 2; in cramfs_readpage() 869 block_start = *(u32 *) in cramfs_readpage() 874 u32 prev_start = block_start; in cramfs_readpage() 878 block_start += PAGE_SIZE; in cramfs_readpage() 881 cramfs_read(sb, block_start, 2); in cramfs_readpage() 882 block_start += 2 + block_len; in cramfs_readpage() 885 block_start &= ~CRAMFS_BLK_FLAGS; in cramfs_readpage() 886 block_len = block_ptr - block_start; in cramfs_readpage() [all …]
|
/linux/kernel/sched/ |
A D | stats.c | 51 u64 sleep_start, block_start; in __update_stats_enqueue_sleeper() local 54 block_start = schedstat_val(stats->block_start); in __update_stats_enqueue_sleeper() 74 if (block_start) { in __update_stats_enqueue_sleeper() 75 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper() 83 __schedstat_set(stats->block_start, 0); in __update_stats_enqueue_sleeper()
|
/linux/fs/btrfs/ |
A D | extent_map.h | 41 u64 block_start; member 72 if (em->block_start + em->block_len < em->block_start) in extent_map_block_end() 74 return em->block_start + em->block_len; in extent_map_block_end()
|
A D | extent_map.c | 214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC && in mergable_maps() 215 prev->block_start != EXTENT_MAP_DELALLOC); in mergable_maps() 224 ((next->block_start == EXTENT_MAP_HOLE && in mergable_maps() 225 prev->block_start == EXTENT_MAP_HOLE) || in mergable_maps() 226 (next->block_start == EXTENT_MAP_INLINE && in mergable_maps() 227 prev->block_start == EXTENT_MAP_INLINE) || in mergable_maps() 228 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && in mergable_maps() 229 next->block_start == extent_map_block_end(prev)))) { in mergable_maps() 260 em->block_start = merge->block_start; in try_merge_map() 571 if (em->block_start < EXTENT_MAP_LAST_BYTE && in merge_extent_mapping() [all …]
|
A D | inode.c | 1091 alloc_hint = em->block_start; in get_extent_allocation_hint() 2370 split_pre->block_start = em->block_start; in split_zoned_em() 2391 split_mid->block_start = em->block_start + pre; in split_zoned_em() 2405 split_post->block_start = em->block_start + em->len - post; in split_zoned_em() 5028 u64 block_start; in btrfs_truncate_block() local 5121 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); in btrfs_truncate_block() 7361 em->block_start = hole_em->block_start; in btrfs_get_extent_fiemap() 7393 const u64 block_start, in btrfs_create_dio_extent() argument 7735 em->block_start = block_start; in create_io_em() 7798 block_start = em->block_start + (start - em->start); in btrfs_get_blocks_direct_write() [all …]
|
A D | file.c | 597 split->block_start = em->block_start; in btrfs_drop_extent_cache() 609 split->block_start = em->block_start; in btrfs_drop_extent_cache() 638 split->block_start = em->block_start; in btrfs_drop_extent_cache() 642 split->block_start = em->block_start in btrfs_drop_extent_cache() 650 split->block_start = em->block_start; in btrfs_drop_extent_cache() 2511 hole_em->block_start = EXTENT_MAP_HOLE; in fill_holes() 2551 if (em->block_start == EXTENT_MAP_HOLE) { in find_first_non_hole() 3196 if (em->block_start == EXTENT_MAP_HOLE) in btrfs_zero_range_check_range_boundary() 3263 alloc_hint = em->block_start + em->len; in btrfs_zero_range() 3527 if (em->block_start == EXTENT_MAP_HOLE || in btrfs_fallocate() [all …]
|
A D | extent_io.c | 2552 logical = em->block_start + logical; in btrfs_get_io_failure_record() 2554 logical = em->block_start; in btrfs_get_io_failure_record() 3568 u64 block_start; in btrfs_do_readpage() local 3645 disk_bytenr = em->block_start; in btrfs_do_readpage() 3648 block_start = em->block_start; in btrfs_do_readpage() 3650 block_start = EXTENT_MAP_HOLE; in btrfs_do_readpage() 3698 if (block_start == EXTENT_MAP_HOLE) { in btrfs_do_readpage() 3725 if (block_start == EXTENT_MAP_INLINE) { in btrfs_do_readpage() 3920 u64 block_start; in __extent_writepage_io() local 3985 block_start = em->block_start; in __extent_writepage_io() [all …]
|
/linux/fs/ |
A D | buffer.c | 1890 block_start = 0; in page_zero_new_buffers() 1911 block_start = block_end; in page_zero_new_buffers() 1978 unsigned block_start, block_end; in __block_write_begin_int() local 1995 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int() 2028 block_start, from); in __block_write_begin_int() 2075 block_start = 0; in __block_commit_write() 2088 block_start = block_end; in __block_commit_write() 2233 block_start = 0; in block_is_partially_uptodate() 2244 block_start = block_end; in block_is_partially_uptodate() 2639 block_start < PAGE_SIZE; in nobh_write_begin() [all …]
|
/linux/fs/ocfs2/ |
A D | aops.c | 423 unsigned block_start, block_end; in walk_page_buffers() local 428 for ( bh = head, block_start = 0; in walk_page_buffers() 573 unsigned int block_start) in ocfs2_should_read_blk() argument 606 for (bh = head, block_start = 0; bh != head || !block_start; in ocfs2_map_page_blocks() 608 block_end = block_start + bsize; in ocfs2_map_page_blocks() 664 block_start = 0; in ocfs2_map_page_blocks() 666 block_end = block_start + bsize; in ocfs2_map_page_blocks() 669 if (block_start >= to) in ocfs2_map_page_blocks() 677 block_start = block_end; in ocfs2_map_page_blocks() 886 block_start = 0; in ocfs2_zero_new_buffers() [all …]
|
/linux/fs/ext4/ |
A D | move_extent.c | 172 unsigned int blocksize, block_start, block_end; in mext_page_mkuptodate() local 186 for (bh = head, block_start = 0; bh != head || !block_start; in mext_page_mkuptodate() 187 block++, block_start = block_end, bh = bh->b_this_page) { in mext_page_mkuptodate() 188 block_end = block_start + blocksize; in mext_page_mkuptodate() 189 if (block_end <= from || block_start >= to) { in mext_page_mkuptodate() 203 zero_user(page, block_start, blocksize); in mext_page_mkuptodate()
|
A D | page-io.c | 442 unsigned block_start; in ext4_bio_write_page() local 478 block_start = bh_offset(bh); in ext4_bio_write_page() 479 if (block_start >= len) { in ext4_bio_write_page()
|
A D | inode.c | 966 unsigned block_start, block_end; in ext4_walk_page_buffers() local 971 for (bh = head, block_start = 0; in ext4_walk_page_buffers() 975 block_end = block_start + blocksize; in ext4_walk_page_buffers() 1045 unsigned block_start, block_end; in ext4_block_write_begin() local 1065 for (bh = head, block_start = 0; bh != head || !block_start; in ext4_block_write_begin() 1067 block_end = block_start + blocksize; in ext4_block_write_begin() 1090 block_start, from); in ext4_block_write_begin() 1353 unsigned int block_start = 0, block_end; in ext4_journalled_zero_new_buffers() local 1358 block_end = block_start + bh->b_size; in ext4_journalled_zero_new_buffers() 1364 start = max(from, block_start); in ext4_journalled_zero_new_buffers() [all …]
|
/linux/lib/zlib_deflate/ |
A D | deflate.c | 539 s->block_start = 0L; in lm_init() 768 s->block_start -= (long) wsize; in fill_window() 832 zlib_tr_flush_block(s, (s->block_start >= 0L ? \ 833 (char *)&s->window[(unsigned)s->block_start] : \ 835 (ulg)((long)s->strstart - s->block_start), \ 837 s->block_start = s->strstart; \ 878 s->block_start >= (long)s->w_size, "slide too late"); in deflate_stored() 885 Assert(s->block_start >= 0L, "block gone"); in deflate_stored() 891 max_start = s->block_start + max_block_size; in deflate_stored() 901 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { in deflate_stored()
|
/linux/drivers/gpu/drm/i915/ |
A D | i915_buddy.c | 348 u64 block_start; in i915_buddy_alloc_range() local 359 block_start = i915_buddy_block_offset(block); in i915_buddy_alloc_range() 360 block_end = block_start + i915_buddy_block_size(mm, block) - 1; in i915_buddy_alloc_range() 362 if (!overlaps(start, end, block_start, block_end)) in i915_buddy_alloc_range() 370 if (contains(start, end, block_start, block_end)) { in i915_buddy_alloc_range()
|
/linux/fs/nilfs2/ |
A D | page.c | 425 unsigned int block_start, block_end; in nilfs_page_count_clean_buffers() local 429 for (bh = head = page_buffers(page), block_start = 0; in nilfs_page_count_clean_buffers() 430 bh != head || !block_start; in nilfs_page_count_clean_buffers() 431 block_start = block_end, bh = bh->b_this_page) { in nilfs_page_count_clean_buffers() 432 block_end = block_start + bh->b_size; in nilfs_page_count_clean_buffers() 433 if (block_end > from && block_start < to && !buffer_dirty(bh)) in nilfs_page_count_clean_buffers()
|
/linux/arch/arm/mm/ |
A D | mmu.c | 1158 phys_addr_t block_start, block_end, memblock_limit = 0; in adjust_lowmem_bounds() local 1176 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds() 1177 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds() 1180 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds() 1181 memblock_mark_nomap(block_start, len); in adjust_lowmem_bounds() 1186 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds() 1187 if (block_start < vmalloc_limit) { in adjust_lowmem_bounds() 1213 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds() 1214 memblock_limit = block_start; in adjust_lowmem_bounds()
|
/linux/drivers/mtd/parsers/ |
A D | afs.c | 227 u32 block_start; in afs_parse_v2_partition() local 281 block_start = imginfo[20]; in afs_parse_v2_partition() 287 block_start, block_end); in afs_parse_v2_partition()
|
/linux/fs/ntfs/ |
A D | mft.c | 513 block_start = 0; in ntfs_sync_mft_mirror() 517 block_end = block_start + blocksize; in ntfs_sync_mft_mirror() 521 if (unlikely(block_start >= m_end)) in ntfs_sync_mft_mirror() 532 (block_start - m_start); in ntfs_sync_mft_mirror() 568 BUG_ON(!nr_bhs && (m_start != block_start)); in ntfs_sync_mft_mirror() 693 block_start = 0; in write_mft_record_nolock() 697 block_end = block_start + blocksize; in write_mft_record_nolock() 701 if (unlikely(block_start >= m_end)) in write_mft_record_nolock() 708 if (block_start == m_start) { in write_mft_record_nolock() 725 (block_start - m_start); in write_mft_record_nolock() [all …]
|
/linux/fs/iomap/ |
A D | buffered-io.c | 536 iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff, in iomap_read_page_sync() argument 544 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); in iomap_read_page_sync() 556 loff_t block_start = round_down(pos, block_size); in __iomap_write_begin() local 565 iomap_adjust_read_range(iter->inode, iop, &block_start, in __iomap_write_begin() 566 block_end - block_start, &poff, &plen); in __iomap_write_begin() 575 if (iomap_block_needs_zeroing(iter, block_start)) { in __iomap_write_begin() 580 int status = iomap_read_page_sync(block_start, page, in __iomap_write_begin() 586 } while ((block_start += plen) < block_end); in __iomap_write_begin()
|
/linux/fs/jbd2/ |
A D | journal.c | 1757 unsigned long long phys_block, block_start, block_stop; /* physical */ in __jbd2_journal_erase() local 1778 block_start = ~0ULL; in __jbd2_journal_erase() 1786 if (block_start == ~0ULL) { in __jbd2_journal_erase() 1787 block_start = phys_block; in __jbd2_journal_erase() 1788 block_stop = block_start - 1; in __jbd2_journal_erase() 1813 byte_start = block_start * journal->j_blocksize; in __jbd2_journal_erase() 1815 byte_count = (block_stop - block_start + 1) * in __jbd2_journal_erase() 1835 err, block_start, block_stop); in __jbd2_journal_erase() 1840 block_start = ~0ULL; in __jbd2_journal_erase()
|
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
A D | core_acl_flex_keys.c | 499 int block_start, int block_end) in mlxsw_afk_clear() argument 503 for (i = block_start; i <= block_end; i++) in mlxsw_afk_clear()
|