Lines Matching refs:t

58 static inline sector_t *get_node(struct dm_table *t,  in get_node()  argument
61 return t->index[l] + (n * KEYS_PER_NODE); in get_node()
68 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high() argument
70 for (; l < t->depth - 1; l++) in high()
73 if (n >= t->counts[l]) in high()
76 return get_node(t, l, n)[KEYS_PER_NODE - 1]; in high()
83 static int setup_btree_index(unsigned int l, struct dm_table *t) in setup_btree_index() argument
88 for (n = 0U; n < t->counts[l]; n++) { in setup_btree_index()
89 node = get_node(t, l, n); in setup_btree_index()
92 node[k] = high(t, l + 1, get_child(n, k)); in setup_btree_index()
102 static int alloc_targets(struct dm_table *t, unsigned int num) in alloc_targets() argument
118 kvfree(t->highs); in alloc_targets()
120 t->num_allocated = num; in alloc_targets()
121 t->highs = n_highs; in alloc_targets()
122 t->targets = n_targets; in alloc_targets()
130 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); in dm_table_create() local
132 if (!t) in dm_table_create()
135 INIT_LIST_HEAD(&t->devices); in dm_table_create()
143 kfree(t); in dm_table_create()
147 if (alloc_targets(t, num_targets)) { in dm_table_create()
148 kfree(t); in dm_table_create()
152 t->type = DM_TYPE_NONE; in dm_table_create()
153 t->mode = mode; in dm_table_create()
154 t->md = md; in dm_table_create()
155 *result = t; in dm_table_create()
173 static void dm_table_destroy_crypto_profile(struct dm_table *t);
175 void dm_table_destroy(struct dm_table *t) in dm_table_destroy() argument
179 if (!t) in dm_table_destroy()
183 if (t->depth >= 2) in dm_table_destroy()
184 kvfree(t->index[t->depth - 2]); in dm_table_destroy()
187 for (i = 0; i < t->num_targets; i++) { in dm_table_destroy()
188 struct dm_target *tgt = t->targets + i; in dm_table_destroy()
196 kvfree(t->highs); in dm_table_destroy()
199 free_devices(&t->devices, t->md); in dm_table_destroy()
201 dm_free_md_mempools(t->mempools); in dm_table_destroy()
203 dm_table_destroy_crypto_profile(t); in dm_table_destroy()
205 kfree(t); in dm_table_destroy()
355 struct dm_table *t = ti->table; in dm_get_device() local
357 BUG_ON(!t); in dm_get_device()
370 dd = find_device(&t->devices, dev); in dm_get_device()
376 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { in dm_get_device()
382 list_add(&dd->list, &t->devices); in dm_get_device()
386 r = upgrade_mode(dd, mode, t->md); in dm_get_device()
636 int dm_table_add_target(struct dm_table *t, const char *type, in dm_table_add_target() argument
643 if (t->singleton) { in dm_table_add_target()
645 dm_device_name(t->md), t->targets->type->name); in dm_table_add_target()
649 BUG_ON(t->num_targets >= t->num_allocated); in dm_table_add_target()
651 tgt = t->targets + t->num_targets; in dm_table_add_target()
655 DMERR("%s: zero-length target", dm_device_name(t->md)); in dm_table_add_target()
661 DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); in dm_table_add_target()
666 if (t->num_targets) { in dm_table_add_target()
670 t->singleton = true; in dm_table_add_target()
673 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { in dm_table_add_target()
678 if (t->immutable_target_type) { in dm_table_add_target()
679 if (t->immutable_target_type != tgt->type) { in dm_table_add_target()
684 if (t->num_targets) { in dm_table_add_target()
688 t->immutable_target_type = tgt->type; in dm_table_add_target()
692 t->integrity_added = 1; in dm_table_add_target()
694 tgt->table = t; in dm_table_add_target()
702 if (!adjoin(t, tgt)) { in dm_table_add_target()
718 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; in dm_table_add_target()
722 dm_device_name(t->md), type); in dm_table_add_target()
727 DMERR("%s: %s: %s (%pe)", dm_device_name(t->md), type, tgt->error, ERR_PTR(r)); in dm_table_add_target()
802 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) in dm_table_set_type() argument
804 t->type = type; in dm_table_set_type()
824 bool dm_table_supports_dax(struct dm_table *t, in dm_table_supports_dax() argument
831 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_dax()
832 ti = dm_table_get_target(t, i); in dm_table_supports_dax()
858 static int dm_table_determine_type(struct dm_table *t) in dm_table_determine_type() argument
863 struct list_head *devices = dm_table_get_devices(t); in dm_table_determine_type()
864 enum dm_queue_mode live_md_type = dm_get_md_type(t->md); in dm_table_determine_type()
867 if (t->type != DM_TYPE_NONE) { in dm_table_determine_type()
869 if (t->type == DM_TYPE_BIO_BASED) { in dm_table_determine_type()
873 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); in dm_table_determine_type()
877 for (i = 0; i < t->num_targets; i++) { in dm_table_determine_type()
878 tgt = t->targets + i; in dm_table_determine_type()
908 t->type = DM_TYPE_BIO_BASED; in dm_table_determine_type()
909 if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || in dm_table_determine_type()
911 t->type = DM_TYPE_DAX_BIO_BASED; in dm_table_determine_type()
918 t->type = DM_TYPE_REQUEST_BASED; in dm_table_determine_type()
927 if (t->num_targets > 1) { in dm_table_determine_type()
934 struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); in dm_table_determine_type()
938 t->type = live_table->type; in dm_table_determine_type()
939 dm_put_live_table(t->md, srcu_idx); in dm_table_determine_type()
943 tgt = dm_table_get_immutable_target(t); in dm_table_determine_type()
962 enum dm_queue_mode dm_table_get_type(struct dm_table *t) in dm_table_get_type() argument
964 return t->type; in dm_table_get_type()
967 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) in dm_table_get_immutable_target_type() argument
969 return t->immutable_target_type; in dm_table_get_immutable_target_type()
972 struct dm_target *dm_table_get_immutable_target(struct dm_table *t) in dm_table_get_immutable_target() argument
975 if (t->num_targets > 1 || in dm_table_get_immutable_target()
976 !dm_target_is_immutable(t->targets[0].type)) in dm_table_get_immutable_target()
979 return t->targets; in dm_table_get_immutable_target()
982 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) in dm_table_get_wildcard_target() argument
987 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_get_wildcard_target()
988 ti = dm_table_get_target(t, i); in dm_table_get_wildcard_target()
996 bool dm_table_bio_based(struct dm_table *t) in dm_table_bio_based() argument
998 return __table_type_bio_based(dm_table_get_type(t)); in dm_table_bio_based()
1001 bool dm_table_request_based(struct dm_table *t) in dm_table_request_based() argument
1003 return __table_type_request_based(dm_table_get_type(t)); in dm_table_request_based()
1006 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) in dm_table_alloc_md_mempools() argument
1008 enum dm_queue_mode type = dm_table_get_type(t); in dm_table_alloc_md_mempools()
1020 for (i = 0; i < t->num_targets; i++) { in dm_table_alloc_md_mempools()
1021 ti = t->targets + i; in dm_table_alloc_md_mempools()
1026 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, in dm_table_alloc_md_mempools()
1028 if (!t->mempools) in dm_table_alloc_md_mempools()
1034 void dm_table_free_md_mempools(struct dm_table *t) in dm_table_free_md_mempools() argument
1036 dm_free_md_mempools(t->mempools); in dm_table_free_md_mempools()
1037 t->mempools = NULL; in dm_table_free_md_mempools()
1040 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) in dm_table_get_md_mempools() argument
1042 return t->mempools; in dm_table_get_md_mempools()
1045 static int setup_indexes(struct dm_table *t) in setup_indexes() argument
1052 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1053 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); in setup_indexes()
1054 total += t->counts[i]; in setup_indexes()
1062 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
1063 t->index[i] = indexes; in setup_indexes()
1064 indexes += (KEYS_PER_NODE * t->counts[i]); in setup_indexes()
1065 setup_btree_index(i, t); in setup_indexes()
1074 static int dm_table_build_index(struct dm_table *t) in dm_table_build_index() argument
1080 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); in dm_table_build_index()
1081 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); in dm_table_build_index()
1084 t->counts[t->depth - 1] = leaf_nodes; in dm_table_build_index()
1085 t->index[t->depth - 1] = t->highs; in dm_table_build_index()
1087 if (t->depth >= 2) in dm_table_build_index()
1088 r = setup_indexes(t); in dm_table_build_index()
1102 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) in dm_table_get_integrity_disk() argument
1104 struct list_head *devices = dm_table_get_devices(t); in dm_table_get_integrity_disk()
1109 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_get_integrity_disk()
1110 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_get_integrity_disk()
1130 dm_device_name(t->md), in dm_table_get_integrity_disk()
1146 static int dm_table_register_integrity(struct dm_table *t) in dm_table_register_integrity() argument
1148 struct mapped_device *md = t->md; in dm_table_register_integrity()
1152 if (t->integrity_added) in dm_table_register_integrity()
1155 template_disk = dm_table_get_integrity_disk(t); in dm_table_register_integrity()
1160 t->integrity_supported = true; in dm_table_register_integrity()
1177 dm_device_name(t->md), in dm_table_register_integrity()
1183 t->integrity_supported = true; in dm_table_register_integrity()
1222 struct dm_table *t; in dm_keyslot_evict() local
1227 t = dm_get_live_table(md, &srcu_idx); in dm_keyslot_evict()
1228 if (!t) in dm_keyslot_evict()
1230 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_keyslot_evict()
1231 ti = dm_table_get_target(t, i); in dm_keyslot_evict()
1265 static void dm_table_destroy_crypto_profile(struct dm_table *t) in dm_table_destroy_crypto_profile() argument
1267 dm_destroy_crypto_profile(t->crypto_profile); in dm_table_destroy_crypto_profile()
1268 t->crypto_profile = NULL; in dm_table_destroy_crypto_profile()
1280 static int dm_table_construct_crypto_profile(struct dm_table *t) in dm_table_construct_crypto_profile() argument
1291 dmcp->md = t->md; in dm_table_construct_crypto_profile()
1300 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_construct_crypto_profile()
1301 ti = dm_table_get_target(t, i); in dm_table_construct_crypto_profile()
1314 if (t->md->queue && in dm_table_construct_crypto_profile()
1316 t->md->queue->crypto_profile)) { in dm_table_construct_crypto_profile()
1343 t->crypto_profile = profile; in dm_table_construct_crypto_profile()
1349 struct dm_table *t) in dm_update_crypto_profile() argument
1351 if (!t->crypto_profile) in dm_update_crypto_profile()
1356 blk_crypto_register(t->crypto_profile, q); in dm_update_crypto_profile()
1359 t->crypto_profile); in dm_update_crypto_profile()
1360 dm_destroy_crypto_profile(t->crypto_profile); in dm_update_crypto_profile()
1362 t->crypto_profile = NULL; in dm_update_crypto_profile()
1367 static int dm_table_construct_crypto_profile(struct dm_table *t) in dm_table_construct_crypto_profile() argument
1376 static void dm_table_destroy_crypto_profile(struct dm_table *t) in dm_table_destroy_crypto_profile() argument
1381 struct dm_table *t) in dm_update_crypto_profile() argument
1391 int dm_table_complete(struct dm_table *t) in dm_table_complete() argument
1395 r = dm_table_determine_type(t); in dm_table_complete()
1401 r = dm_table_build_index(t); in dm_table_complete()
1407 r = dm_table_register_integrity(t); in dm_table_complete()
1413 r = dm_table_construct_crypto_profile(t); in dm_table_complete()
1419 r = dm_table_alloc_md_mempools(t, t->md); in dm_table_complete()
1427 void dm_table_event_callback(struct dm_table *t, in dm_table_event_callback() argument
1431 t->event_fn = fn; in dm_table_event_callback()
1432 t->event_context = context; in dm_table_event_callback()
1436 void dm_table_event(struct dm_table *t) in dm_table_event() argument
1439 if (t->event_fn) in dm_table_event()
1440 t->event_fn(t->event_context); in dm_table_event()
1445 inline sector_t dm_table_get_size(struct dm_table *t) in dm_table_get_size() argument
1447 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; in dm_table_get_size()
1451 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) in dm_table_get_target() argument
1453 if (index >= t->num_targets) in dm_table_get_target()
1456 return t->targets + index; in dm_table_get_target()
1465 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) in dm_table_find_target() argument
1470 if (unlikely(sector >= dm_table_get_size(t))) in dm_table_find_target()
1473 for (l = 0; l < t->depth; l++) { in dm_table_find_target()
1475 node = get_node(t, l, n); in dm_table_find_target()
1482 return &t->targets[(KEYS_PER_NODE * n) + k]; in dm_table_find_target()
1508 static bool dm_table_any_dev_attr(struct dm_table *t, in dm_table_any_dev_attr() argument
1514 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_any_dev_attr()
1515 ti = dm_table_get_target(t, i); in dm_table_any_dev_attr()
1577 static bool dm_table_supports_zoned_model(struct dm_table *t, in dm_table_supports_zoned_model() argument
1583 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_zoned_model()
1584 ti = dm_table_get_target(t, i); in dm_table_supports_zoned_model()
1732 static void dm_table_verify_integrity(struct dm_table *t) in dm_table_verify_integrity() argument
1736 if (t->integrity_added) in dm_table_verify_integrity()
1739 if (t->integrity_supported) { in dm_table_verify_integrity()
1744 template_disk = dm_table_get_integrity_disk(t); in dm_table_verify_integrity()
1746 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) in dm_table_verify_integrity()
1750 if (integrity_profile_exists(dm_disk(t->md))) { in dm_table_verify_integrity()
1752 dm_device_name(t->md)); in dm_table_verify_integrity()
1753 blk_integrity_unregister(dm_disk(t->md)); in dm_table_verify_integrity()
1766 static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) in dm_table_supports_flush() argument
1777 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_flush()
1778 ti = dm_table_get_target(t, i); in dm_table_supports_flush()
1832 static bool dm_table_supports_write_same(struct dm_table *t) in dm_table_supports_write_same() argument
1837 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_write_same()
1838 ti = dm_table_get_target(t, i); in dm_table_supports_write_same()
1859 static bool dm_table_supports_write_zeroes(struct dm_table *t) in dm_table_supports_write_zeroes() argument
1864 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_write_zeroes()
1865 ti = dm_table_get_target(t, i++); in dm_table_supports_write_zeroes()
1886 static bool dm_table_supports_nowait(struct dm_table *t) in dm_table_supports_nowait() argument
1891 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_nowait()
1892 ti = dm_table_get_target(t, i++); in dm_table_supports_nowait()
1913 static bool dm_table_supports_discards(struct dm_table *t) in dm_table_supports_discards() argument
1918 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_discards()
1919 ti = dm_table_get_target(t, i); in dm_table_supports_discards()
1947 static bool dm_table_supports_secure_erase(struct dm_table *t) in dm_table_supports_secure_erase() argument
1952 for (i = 0; i < dm_table_get_num_targets(t); i++) { in dm_table_supports_secure_erase()
1953 ti = dm_table_get_target(t, i); in dm_table_supports_secure_erase()
1975 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1987 if (dm_table_supports_nowait(t)) in dm_table_set_restrictions()
1992 if (!dm_table_supports_discards(t)) { in dm_table_set_restrictions()
2003 if (dm_table_supports_secure_erase(t)) in dm_table_set_restrictions()
2006 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { in dm_table_set_restrictions()
2008 if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) in dm_table_set_restrictions()
2013 if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { in dm_table_set_restrictions()
2015 if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) in dm_table_set_restrictions()
2016 set_dax_synchronous(t->md->dax_dev); in dm_table_set_restrictions()
2021 if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) in dm_table_set_restrictions()
2022 dax_write_cache(t->md->dax_dev, true); in dm_table_set_restrictions()
2025 if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) in dm_table_set_restrictions()
2030 if (!dm_table_supports_write_same(t)) in dm_table_set_restrictions()
2032 if (!dm_table_supports_write_zeroes(t)) in dm_table_set_restrictions()
2035 dm_table_verify_integrity(t); in dm_table_set_restrictions()
2044 if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) in dm_table_set_restrictions()
2056 dm_table_any_dev_attr(t, device_is_not_random, NULL)) in dm_table_set_restrictions()
2064 r = dm_set_zones_restrictions(t, q); in dm_table_set_restrictions()
2069 dm_update_crypto_profile(q, t); in dm_table_set_restrictions()
2070 disk_update_readahead(t->md->disk); in dm_table_set_restrictions()
2075 unsigned int dm_table_get_num_targets(struct dm_table *t) in dm_table_get_num_targets() argument
2077 return t->num_targets; in dm_table_get_num_targets()
2080 struct list_head *dm_table_get_devices(struct dm_table *t) in dm_table_get_devices() argument
2082 return &t->devices; in dm_table_get_devices()
2085 fmode_t dm_table_get_mode(struct dm_table *t) in dm_table_get_mode() argument
2087 return t->mode; in dm_table_get_mode()
2097 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) in suspend_targets() argument
2099 int i = t->num_targets; in suspend_targets()
2100 struct dm_target *ti = t->targets; in suspend_targets()
2102 lockdep_assert_held(&t->md->suspend_lock); in suspend_targets()
2123 void dm_table_presuspend_targets(struct dm_table *t) in dm_table_presuspend_targets() argument
2125 if (!t) in dm_table_presuspend_targets()
2128 suspend_targets(t, PRESUSPEND); in dm_table_presuspend_targets()
2131 void dm_table_presuspend_undo_targets(struct dm_table *t) in dm_table_presuspend_undo_targets() argument
2133 if (!t) in dm_table_presuspend_undo_targets()
2136 suspend_targets(t, PRESUSPEND_UNDO); in dm_table_presuspend_undo_targets()
2139 void dm_table_postsuspend_targets(struct dm_table *t) in dm_table_postsuspend_targets() argument
2141 if (!t) in dm_table_postsuspend_targets()
2144 suspend_targets(t, POSTSUSPEND); in dm_table_postsuspend_targets()
2147 int dm_table_resume_targets(struct dm_table *t) in dm_table_resume_targets() argument
2151 lockdep_assert_held(&t->md->suspend_lock); in dm_table_resume_targets()
2153 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2154 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
2162 dm_device_name(t->md), ti->type->name, r); in dm_table_resume_targets()
2167 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
2168 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
2177 struct mapped_device *dm_table_get_md(struct dm_table *t) in dm_table_get_md() argument
2179 return t->md; in dm_table_get_md()
2183 const char *dm_table_device_name(struct dm_table *t) in dm_table_device_name() argument
2185 return dm_device_name(t->md); in dm_table_device_name()
2189 void dm_table_run_md_queue_async(struct dm_table *t) in dm_table_run_md_queue_async() argument
2191 if (!dm_table_request_based(t)) in dm_table_run_md_queue_async()
2194 if (t->md->queue) in dm_table_run_md_queue_async()
2195 blk_mq_run_hw_queues(t->md->queue, true); in dm_table_run_md_queue_async()