Lines Matching refs:lnum

107 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,  in ubi_eba_get_ldesc()  argument
110 ldesc->lnum = lnum; in ubi_eba_get_ldesc()
111 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_get_ldesc()
211 int lnum) in ltree_lookup() argument
226 if (lnum < le->lnum) in ltree_lookup()
228 else if (lnum > le->lnum) in ltree_lookup()
250 int vol_id, int lnum) in ltree_add_entry() argument
261 le->lnum = lnum; in ltree_add_entry()
264 le1 = ltree_lookup(ubi, vol_id, lnum); in ltree_add_entry()
292 ubi_assert(lnum != le1->lnum); in ltree_add_entry()
293 if (lnum < le1->lnum) in ltree_add_entry()
319 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) in leb_read_lock() argument
323 le = ltree_add_entry(ubi, vol_id, lnum); in leb_read_lock()
336 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) in leb_read_unlock() argument
341 le = ltree_lookup(ubi, vol_id, lnum); in leb_read_unlock()
361 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_lock() argument
365 le = ltree_add_entry(ubi, vol_id, lnum); in leb_write_lock()
383 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_trylock() argument
387 le = ltree_add_entry(ubi, vol_id, lnum); in leb_write_trylock()
412 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_unlock() argument
417 le = ltree_lookup(ubi, vol_id, lnum); in leb_write_unlock()
435 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum) in ubi_eba_is_mapped() argument
437 return vol->eba_tbl->entries[lnum].pnum >= 0; in ubi_eba_is_mapped()
451 int lnum) in ubi_eba_unmap_leb() argument
458 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_unmap_leb()
462 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_unmap_leb()
467 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); in ubi_eba_unmap_leb()
470 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; in ubi_eba_unmap_leb()
472 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); in ubi_eba_unmap_leb()
475 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_unmap_leb()
497 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in check_mapping() argument
507 if (!vol->checkmap || test_bit(lnum, vol->checkmap)) in check_mapping()
532 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; in check_mapping()
534 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture); in check_mapping()
549 found_lnum = be32_to_cpu(vid_hdr->lnum); in check_mapping()
551 if (found_lnum != lnum || found_vol_id != vol->vol_id) { in check_mapping()
553 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum); in check_mapping()
560 set_bit(lnum, vol->checkmap); in check_mapping()
569 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in check_mapping() argument
595 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in ubi_eba_read_leb() argument
603 err = leb_read_lock(ubi, vol_id, lnum); in ubi_eba_read_leb()
607 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_read_leb()
609 err = check_mapping(ubi, vol, lnum, &pnum); in ubi_eba_read_leb()
621 len, offset, vol_id, lnum); in ubi_eba_read_leb()
622 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
629 len, offset, vol_id, lnum, pnum); in ubi_eba_read_leb()
658 pnum, vol_id, lnum); in ubi_eba_read_leb()
686 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); in ubi_eba_read_leb()
723 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
729 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
748 struct ubi_sgl *sgl, int lnum, int offset, int len, in ubi_eba_read_leb_sg() argument
763 ret = ubi_eba_read_leb(ubi, vol, lnum, in ubi_eba_read_leb_sg()
806 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum, in try_recover_peb() argument
866 vol->eba_tbl->entries[lnum].pnum = new_pnum; in try_recover_peb()
872 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); in try_recover_peb()
879 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); in try_recover_peb()
902 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, in recover_peb() argument
916 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb, in recover_peb()
944 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum, in try_write_vid_and_data() argument
957 opnum = vol->eba_tbl->entries[lnum].pnum; in try_write_vid_and_data()
960 len, offset, vol_id, lnum, pnum); in try_write_vid_and_data()
965 vol_id, lnum, pnum); in try_write_vid_and_data()
974 len, offset, vol_id, lnum, pnum); in try_write_vid_and_data()
979 vol->eba_tbl->entries[lnum].pnum = pnum; in try_write_vid_and_data()
985 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); in try_write_vid_and_data()
987 err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0); in try_write_vid_and_data()
1007 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in ubi_eba_write_leb() argument
1017 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1021 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_write_leb()
1023 err = check_mapping(ubi, vol, lnum, &pnum); in ubi_eba_write_leb()
1030 len, offset, vol_id, lnum, pnum); in ubi_eba_write_leb()
1036 err = recover_peb(ubi, pnum, vol_id, lnum, buf, in ubi_eba_write_leb()
1049 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1058 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_write_leb()
1063 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len); in ubi_eba_write_leb()
1083 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1111 int lnum, const void *buf, int len, int used_ebs) in ubi_eba_write_leb_st() argument
1121 if (lnum == used_ebs - 1) in ubi_eba_write_leb_st()
1133 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_write_leb_st()
1139 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_write_leb_st()
1149 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); in ubi_eba_write_leb_st()
1152 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); in ubi_eba_write_leb_st()
1163 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb_st()
1189 int lnum, const void *buf, int len) in ubi_eba_atomic_leb_change() argument
1204 err = ubi_eba_unmap_leb(ubi, vol, lnum); in ubi_eba_atomic_leb_change()
1207 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); in ubi_eba_atomic_leb_change()
1217 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_atomic_leb_change()
1223 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_atomic_leb_change()
1233 dbg_eba("change LEB %d:%d", vol_id, lnum); in ubi_eba_atomic_leb_change()
1236 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len); in ubi_eba_atomic_leb_change()
1252 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_atomic_leb_change()
1304 int err, vol_id, lnum, data_size, aldata_size, idx; in ubi_eba_copy_leb() local
1312 lnum = be32_to_cpu(vid_hdr->lnum); in ubi_eba_copy_leb()
1314 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); in ubi_eba_copy_leb()
1354 err = leb_write_trylock(ubi, vol_id, lnum); in ubi_eba_copy_leb()
1356 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); in ubi_eba_copy_leb()
1365 if (vol->eba_tbl->entries[lnum].pnum != from) { in ubi_eba_copy_leb()
1367 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); in ubi_eba_copy_leb()
1452 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); in ubi_eba_copy_leb()
1453 vol->eba_tbl->entries[lnum].pnum = to; in ubi_eba_copy_leb()
1458 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_copy_leb()
1565 scan_eba[i][aeb->lnum] = aeb->pnum; in self_check_eba()
1572 fm_eba[i][aeb->lnum] = aeb->pnum; in self_check_eba()
1649 if (aeb->lnum >= vol->reserved_pebs) { in ubi_eba_init()
1658 entry = &vol->eba_tbl->entries[aeb->lnum]; in ubi_eba_init()