Lines Matching refs:ns

53 bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)  in nvme_mpath_set_disk_name()  argument
57 if (!ns->head->disk) { in nvme_mpath_set_disk_name()
58 sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance, in nvme_mpath_set_disk_name()
59 ns->head->instance); in nvme_mpath_set_disk_name()
62 sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance, in nvme_mpath_set_disk_name()
63 ns->ctrl->instance, ns->head->instance); in nvme_mpath_set_disk_name()
70 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
75 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
82 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
83 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
84 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
87 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
89 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
95 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
96 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
99 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
104 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
107 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
108 if (!ns->head->disk) in nvme_kick_requeue_lists()
110 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
112 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
126 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
128 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
136 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
147 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
150 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_mpath_clear_ctrl_paths()
151 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
152 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
157 void nvme_mpath_revalidate_paths(struct nvme_ns *ns) in nvme_mpath_revalidate_paths() argument
159 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths()
163 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths()
164 if (capacity != get_capacity(ns->disk)) in nvme_mpath_revalidate_paths()
165 clear_bit(NVME_NS_READY, &ns->flags); in nvme_mpath_revalidate_paths()
172 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
179 if (ns->ctrl->state != NVME_CTRL_LIVE && in nvme_path_is_disabled()
180 ns->ctrl->state != NVME_CTRL_DELETING) in nvme_path_is_disabled()
182 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
183 !test_bit(NVME_NS_READY, &ns->flags)) in nvme_path_is_disabled()
191 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
193 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
194 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
198 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
202 switch (ns->ana_state) { in __nvme_find_path()
206 found = ns; in __nvme_find_path()
212 fallback = ns; in __nvme_find_path()
228 struct nvme_ns *ns) in nvme_next_ns() argument
230 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
232 if (ns) in nvme_next_ns()
233 return ns; in nvme_next_ns()
240 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
248 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
249 ns && ns != old; in nvme_round_robin_path()
250 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
251 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
254 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
255 found = ns; in nvme_round_robin_path()
258 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
259 found = ns; in nvme_round_robin_path()
280 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
282 return ns->ctrl->state == NVME_CTRL_LIVE && in nvme_path_is_optimized()
283 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
289 struct nvme_ns *ns; in nvme_find_path() local
291 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
292 if (unlikely(!ns)) in nvme_find_path()
296 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
297 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_find_path()
299 return ns; in nvme_find_path()
304 struct nvme_ns *ns; in nvme_available_path() local
306 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
307 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) in nvme_available_path()
309 switch (ns->ctrl->state) { in nvme_available_path()
326 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
337 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
338 if (likely(ns)) { in nvme_ns_head_submit_bio()
339 bio_set_dev(bio, ns->disk->part0); in nvme_ns_head_submit_bio()
341 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
377 struct nvme_ns *ns; in nvme_ns_head_report_zones() local
381 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
382 if (ns) in nvme_ns_head_report_zones()
383 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
508 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
510 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
525 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); in nvme_mpath_set_live()
532 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
596 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
598 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
599 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
600 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
602 if (nvme_state_is_live(ns->ana_state)) in nvme_update_ns_ana_state()
603 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
611 struct nvme_ns *ns; in nvme_update_ana_state() local
624 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
628 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
630 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
631 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
634 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
753 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
755 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
771 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) in nvme_mpath_add_disk() argument
773 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
779 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
780 ns->ana_grpid = le32_to_cpu(id->anagrpid); in nvme_mpath_add_disk()
781 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
782 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
785 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
788 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
789 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
792 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
793 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
796 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
798 ns->head->disk->queue); in nvme_mpath_add_disk()
800 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
801 ns->head->disk->queue->nr_zones = ns->queue->nr_zones; in nvme_mpath_add_disk()