Lines Matching refs:mdsc
61 static void __wake_requests(struct ceph_mds_client *mdsc,
678 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, in __ceph_lookup_mds_session() argument
681 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) in __ceph_lookup_mds_session()
683 return ceph_get_mds_session(mdsc->sessions[mds]); in __ceph_lookup_mds_session()
686 static bool __have_session(struct ceph_mds_client *mdsc, int mds) in __have_session() argument
688 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) in __have_session()
694 static int __verify_registered_session(struct ceph_mds_client *mdsc, in __verify_registered_session() argument
697 if (s->s_mds >= mdsc->max_sessions || in __verify_registered_session()
698 mdsc->sessions[s->s_mds] != s) in __verify_registered_session()
707 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, in register_session() argument
712 if (mds >= mdsc->mdsmap->possible_max_rank) in register_session()
719 if (mds >= mdsc->max_sessions) { in register_session()
727 if (mdsc->sessions) { in register_session()
728 memcpy(sa, mdsc->sessions, in register_session()
729 mdsc->max_sessions * sizeof(void *)); in register_session()
730 kfree(mdsc->sessions); in register_session()
732 mdsc->sessions = sa; in register_session()
733 mdsc->max_sessions = newmax; in register_session()
737 s->s_mdsc = mdsc; in register_session()
742 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); in register_session()
759 mdsc->sessions[mds] = s; in register_session()
760 atomic_inc(&mdsc->num_sessions); in register_session()
764 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in register_session()
776 static void __unregister_session(struct ceph_mds_client *mdsc, in __unregister_session() argument
780 BUG_ON(mdsc->sessions[s->s_mds] != s); in __unregister_session()
781 mdsc->sessions[s->s_mds] = NULL; in __unregister_session()
784 atomic_dec(&mdsc->num_sessions); in __unregister_session()
800 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc, in ceph_mdsc_iterate_sessions() argument
806 mutex_lock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
807 for (mds = 0; mds < mdsc->max_sessions; ++mds) { in ceph_mdsc_iterate_sessions()
810 s = __ceph_lookup_mds_session(mdsc, mds); in ceph_mdsc_iterate_sessions()
819 mutex_unlock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
822 mutex_lock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
824 mutex_unlock(&mdsc->mutex); in ceph_mdsc_iterate_sessions()
881 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) in DEFINE_RB_FUNCS()
885 req = lookup_request(&mdsc->request_tree, tid); in DEFINE_RB_FUNCS()
898 static void __register_request(struct ceph_mds_client *mdsc, in __register_request() argument
904 req->r_tid = ++mdsc->last_tid; in __register_request()
906 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation, in __register_request()
918 insert_request(&mdsc->request_tree, req); in __register_request()
922 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) in __register_request()
923 mdsc->oldest_tid = req->r_tid; in __register_request()
936 static void __unregister_request(struct ceph_mds_client *mdsc, in __unregister_request() argument
944 if (req->r_tid == mdsc->oldest_tid) { in __unregister_request()
946 mdsc->oldest_tid = 0; in __unregister_request()
951 mdsc->oldest_tid = next_req->r_tid; in __unregister_request()
958 erase_request(&mdsc->request_tree, req); in __unregister_request()
1014 static int __choose_mds(struct ceph_mds_client *mdsc, in __choose_mds() argument
1034 (__have_session(mdsc, req->r_resend_mds) || in __choose_mds()
1035 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { in __choose_mds()
1065 if (!dir || dir->i_sb != mdsc->fsc->sb) { in __choose_mds()
1112 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
1114 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds)) in __choose_mds()
1127 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
1129 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap, in __choose_mds()
1162 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); in __choose_mds()
1264 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) in create_session_open_msg() argument
1271 struct ceph_options *opt = mdsc->fsc->client->options; in create_session_open_msg()
1272 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; in create_session_open_msg()
1278 {"hostname", mdsc->nodename}, in create_session_open_msg()
1374 static int __open_session(struct ceph_mds_client *mdsc, in __open_session() argument
1382 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); in __open_session()
1389 msg = create_session_open_msg(mdsc, session->s_seq); in __open_session()
1402 __open_export_target_session(struct ceph_mds_client *mdsc, int target) in __open_export_target_session() argument
1407 session = __ceph_lookup_mds_session(mdsc, target); in __open_export_target_session()
1409 session = register_session(mdsc, target); in __open_export_target_session()
1415 ret = __open_session(mdsc, session); in __open_export_target_session()
1424 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) in ceph_mdsc_open_export_target_session() argument
1430 mutex_lock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
1431 session = __open_export_target_session(mdsc, target); in ceph_mdsc_open_export_target_session()
1432 mutex_unlock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
1437 static void __open_export_target_sessions(struct ceph_mds_client *mdsc, in __open_export_target_sessions() argument
1444 if (mds >= mdsc->mdsmap->possible_max_rank) in __open_export_target_sessions()
1447 mi = &mdsc->mdsmap->m_info[mds]; in __open_export_target_sessions()
1452 ts = __open_export_target_session(mdsc, mi->export_targets[i]); in __open_export_target_sessions()
1457 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, in ceph_mdsc_open_export_target_sessions() argument
1460 mutex_lock(&mdsc->mutex); in ceph_mdsc_open_export_target_sessions()
1461 __open_export_target_sessions(mdsc, session); in ceph_mdsc_open_export_target_sessions()
1462 mutex_unlock(&mdsc->mutex); in ceph_mdsc_open_export_target_sessions()
1479 static void dispose_cap_releases(struct ceph_mds_client *mdsc, in dispose_cap_releases() argument
1487 ceph_put_cap(mdsc, cap); in dispose_cap_releases()
1491 static void cleanup_session_requests(struct ceph_mds_client *mdsc, in cleanup_session_requests() argument
1498 mutex_lock(&mdsc->mutex); in cleanup_session_requests()
1508 __unregister_request(mdsc, req); in cleanup_session_requests()
1511 p = rb_first(&mdsc->request_tree); in cleanup_session_requests()
1519 mutex_unlock(&mdsc->mutex); in cleanup_session_requests()
1626 wake_up_all(&fsc->mdsc->cap_flushing_wq); in remove_session_caps()
1714 static int send_renew_caps(struct ceph_mds_client *mdsc, in send_renew_caps() argument
1727 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); in send_renew_caps()
1744 static int send_flushmsg_ack(struct ceph_mds_client *mdsc, in send_flushmsg_ack() argument
1764 static void renewed_caps(struct ceph_mds_client *mdsc, in renewed_caps() argument
1774 mdsc->mdsmap->m_session_timeout*HZ; in renewed_caps()
1814 static int __close_session(struct ceph_mds_client *mdsc, in __close_session() argument
1929 int ceph_trim_caps(struct ceph_mds_client *mdsc, in ceph_trim_caps() argument
1946 ceph_flush_cap_releases(mdsc, session); in ceph_trim_caps()
1950 static int check_caps_flush(struct ceph_mds_client *mdsc, in check_caps_flush() argument
1955 spin_lock(&mdsc->cap_dirty_lock); in check_caps_flush()
1956 if (!list_empty(&mdsc->cap_flush_list)) { in check_caps_flush()
1958 list_first_entry(&mdsc->cap_flush_list, in check_caps_flush()
1966 spin_unlock(&mdsc->cap_dirty_lock); in check_caps_flush()
1975 static void wait_caps_flush(struct ceph_mds_client *mdsc, in wait_caps_flush() argument
1980 wait_event(mdsc->cap_flushing_wq, in wait_caps_flush()
1981 check_caps_flush(mdsc, want_flush_tid)); in wait_caps_flush()
1989 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc, in ceph_send_cap_releases() argument
1995 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; in ceph_send_cap_releases()
2041 ceph_put_cap(mdsc, cap); in ceph_send_cap_releases()
2096 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, in ceph_flush_cap_releases() argument
2099 if (mdsc->stopping) in ceph_flush_cap_releases()
2103 if (queue_work(mdsc->fsc->cap_wq, in ceph_flush_cap_releases()
2127 struct ceph_mds_client *mdsc = in ceph_cap_reclaim_work() local
2129 int ret = ceph_trim_dentries(mdsc); in ceph_cap_reclaim_work()
2131 ceph_queue_cap_reclaim_work(mdsc); in ceph_cap_reclaim_work()
2134 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc) in ceph_queue_cap_reclaim_work() argument
2136 if (mdsc->stopping) in ceph_queue_cap_reclaim_work()
2139 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) { in ceph_queue_cap_reclaim_work()
2146 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr) in ceph_reclaim_caps_nr() argument
2151 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending); in ceph_reclaim_caps_nr()
2153 atomic_set(&mdsc->cap_reclaim_pending, 0); in ceph_reclaim_caps_nr()
2154 ceph_queue_cap_reclaim_work(mdsc); in ceph_reclaim_caps_nr()
2204 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) in ceph_mdsc_create_request() argument
2213 req->r_mdsc = mdsc; in ceph_mdsc_create_request()
2239 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) in __get_oldest_req() argument
2241 if (RB_EMPTY_ROOT(&mdsc->request_tree)) in __get_oldest_req()
2243 return rb_entry(rb_first(&mdsc->request_tree), in __get_oldest_req()
2247 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) in __get_oldest_tid() argument
2249 return mdsc->oldest_tid; in __get_oldest_tid()
2442 struct ceph_mds_client *mdsc = session->s_mdsc; in create_request_message() local
2518 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); in create_request_message()
2596 static void complete_request(struct ceph_mds_client *mdsc, in complete_request() argument
2602 req->r_callback(mdsc, req); in complete_request()
2626 struct ceph_mds_client *mdsc = session->s_mdsc; in __prepare_send_request() local
2690 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); in __prepare_send_request()
2726 static void __do_request(struct ceph_mds_client *mdsc, in __do_request() argument
2736 __unregister_request(mdsc, req); in __do_request()
2746 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { in __do_request()
2751 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { in __do_request()
2752 if (mdsc->mdsmap_err) { in __do_request()
2753 err = mdsc->mdsmap_err; in __do_request()
2757 if (mdsc->mdsmap->m_epoch == 0) { in __do_request()
2759 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
2762 if (!(mdsc->fsc->mount_options->flags & in __do_request()
2764 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { in __do_request()
2772 mds = __choose_mds(mdsc, req, &random); in __do_request()
2774 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { in __do_request()
2780 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
2785 session = __ceph_lookup_mds_session(mdsc, mds); in __do_request()
2787 session = register_session(mdsc, mds); in __do_request()
2815 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) in __do_request()
2816 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
2824 err = __open_session(mdsc, session); in __do_request()
2849 complete_request(mdsc, req); in __do_request()
2850 __unregister_request(mdsc, req); in __do_request()
2858 static void __wake_requests(struct ceph_mds_client *mdsc, in __wake_requests() argument
2871 __do_request(mdsc, req); in __wake_requests()
2879 static void kick_requests(struct ceph_mds_client *mdsc, int mds) in kick_requests() argument
2882 struct rb_node *p = rb_first(&mdsc->request_tree); in kick_requests()
2896 __do_request(mdsc, req); in kick_requests()
2901 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, in ceph_mdsc_submit_request() argument
2915 __ceph_touch_fmode(ci, mdsc, fmode); in ceph_mdsc_submit_request()
2941 mutex_lock(&mdsc->mutex); in ceph_mdsc_submit_request()
2942 __register_request(mdsc, req, dir); in ceph_mdsc_submit_request()
2943 __do_request(mdsc, req); in ceph_mdsc_submit_request()
2945 mutex_unlock(&mdsc->mutex); in ceph_mdsc_submit_request()
2949 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc, in ceph_mdsc_wait_request() argument
2957 err = req->r_wait_for_completion(mdsc, req); in ceph_mdsc_wait_request()
2970 mutex_lock(&mdsc->mutex); in ceph_mdsc_wait_request()
2995 mutex_unlock(&mdsc->mutex); in ceph_mdsc_wait_request()
3003 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, in ceph_mdsc_do_request() argument
3012 err = ceph_mdsc_submit_request(mdsc, dir, req); in ceph_mdsc_do_request()
3014 err = ceph_mdsc_wait_request(mdsc, req); in ceph_mdsc_do_request()
3048 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_reply() local
3065 mutex_lock(&mdsc->mutex); in handle_reply()
3066 req = lookup_get_request(mdsc, tid); in handle_reply()
3069 mutex_unlock(&mdsc->mutex); in handle_reply()
3079 mutex_unlock(&mdsc->mutex); in handle_reply()
3088 mutex_unlock(&mdsc->mutex); in handle_reply()
3094 mutex_unlock(&mdsc->mutex); in handle_reply()
3113 __do_request(mdsc, req); in handle_reply()
3114 mutex_unlock(&mdsc->mutex); in handle_reply()
3117 int mds = __choose_mds(mdsc, req, NULL); in handle_reply()
3120 __do_request(mdsc, req); in handle_reply()
3121 mutex_unlock(&mdsc->mutex); in handle_reply()
3131 __unregister_request(mdsc, req); in handle_reply()
3134 if (mdsc->stopping && !__get_oldest_req(mdsc)) in handle_reply()
3135 complete_all(&mdsc->safe_umount_waiters); in handle_reply()
3147 mutex_unlock(&mdsc->mutex); in handle_reply()
3161 mutex_unlock(&mdsc->mutex); in handle_reply()
3171 in = ceph_get_inode(mdsc->fsc->sb, tvino); in handle_reply()
3190 down_write(&mdsc->snap_rwsem); in handle_reply()
3191 ceph_update_snap_trace(mdsc, rinfo->snapblob, in handle_reply()
3195 downgrade_write(&mdsc->snap_rwsem); in handle_reply()
3197 down_read(&mdsc->snap_rwsem); in handle_reply()
3203 err = ceph_fill_trace(mdsc->fsc->sb, req); in handle_reply()
3212 up_read(&mdsc->snap_rwsem); in handle_reply()
3214 ceph_put_snap_realm(mdsc, realm); in handle_reply()
3227 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); in handle_reply()
3230 mutex_lock(&mdsc->mutex); in handle_reply()
3241 mutex_unlock(&mdsc->mutex); in handle_reply()
3246 complete_request(mdsc, req); in handle_reply()
3248 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency, in handle_reply()
3260 static void handle_forward(struct ceph_mds_client *mdsc, in handle_forward() argument
3276 mutex_lock(&mdsc->mutex); in handle_forward()
3277 req = lookup_get_request(mdsc, tid); in handle_forward()
3285 __unregister_request(mdsc, req); in handle_forward()
3298 __do_request(mdsc, req); in handle_forward()
3302 mutex_unlock(&mdsc->mutex); in handle_forward()
3343 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_session() local
3392 mutex_lock(&mdsc->mutex); in handle_session()
3395 __unregister_session(mdsc, session); in handle_session()
3398 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; in handle_session()
3399 mutex_unlock(&mdsc->mutex); in handle_session()
3418 renewed_caps(mdsc, session, 0); in handle_session()
3420 metric_schedule_delayed(&mdsc->metric); in handle_session()
3422 if (mdsc->stopping) in handle_session()
3423 __close_session(mdsc, session); in handle_session()
3428 renewed_caps(mdsc, session, 1); in handle_session()
3435 cleanup_session_requests(mdsc, session); in handle_session()
3438 wake_up_all(&mdsc->session_close_wq); in handle_session()
3446 send_renew_caps(mdsc, session); in handle_session()
3450 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); in handle_session()
3454 send_flushmsg_ack(mdsc, session, seq); in handle_session()
3469 cleanup_session_requests(mdsc, session); in handle_session()
3472 mdsc->fsc->blocklisted = true; in handle_session()
3483 mutex_lock(&mdsc->mutex); in handle_session()
3484 __wake_requests(mdsc, &session->s_waiting); in handle_session()
3486 kick_requests(mdsc, mds); in handle_session()
3487 mutex_unlock(&mdsc->mutex); in handle_session()
3526 static void replay_unsafe_requests(struct ceph_mds_client *mdsc, in replay_unsafe_requests() argument
3534 mutex_lock(&mdsc->mutex); in replay_unsafe_requests()
3542 p = rb_first(&mdsc->request_tree); in replay_unsafe_requests()
3559 mutex_unlock(&mdsc->mutex); in replay_unsafe_requests()
3853 static int encode_snap_realms(struct ceph_mds_client *mdsc, in encode_snap_realms() argument
3861 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms); in encode_snap_realms()
3871 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { in encode_snap_realms()
3923 static void send_mds_reconnect(struct ceph_mds_client *mdsc, in send_mds_reconnect() argument
3967 dispose_cap_releases(mdsc, &dispose); in send_mds_reconnect()
3970 if (mdsc->fsc->sb->s_root) in send_mds_reconnect()
3971 shrink_dcache_parent(mdsc->fsc->sb->s_root); in send_mds_reconnect()
3976 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in send_mds_reconnect()
3979 replay_unsafe_requests(mdsc, session); in send_mds_reconnect()
3981 ceph_early_kick_flushing_caps(mdsc, session); in send_mds_reconnect()
3983 down_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4009 if (mdsc->num_snap_realms) { in send_mds_reconnect()
4012 mdsc->num_snap_realms * in send_mds_reconnect()
4018 total_len += mdsc->num_snap_realms * in send_mds_reconnect()
4035 err = encode_snap_realms(mdsc, &recon_state); in send_mds_reconnect()
4051 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms); in send_mds_reconnect()
4070 mutex_lock(&mdsc->mutex); in send_mds_reconnect()
4071 __wake_requests(mdsc, &session->s_waiting); in send_mds_reconnect()
4072 mutex_unlock(&mdsc->mutex); in send_mds_reconnect()
4074 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4080 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
4096 static void check_new_map(struct ceph_mds_client *mdsc, in check_new_map() argument
4115 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) { in check_new_map()
4116 if (!mdsc->sessions[i]) in check_new_map()
4118 s = mdsc->sessions[i]; in check_new_map()
4132 __unregister_session(mdsc, s); in check_new_map()
4133 __wake_requests(mdsc, &s->s_waiting); in check_new_map()
4134 mutex_unlock(&mdsc->mutex); in check_new_map()
4137 cleanup_session_requests(mdsc, s); in check_new_map()
4143 mutex_lock(&mdsc->mutex); in check_new_map()
4144 kick_requests(mdsc, i); in check_new_map()
4152 mutex_unlock(&mdsc->mutex); in check_new_map()
4154 mutex_lock(&mdsc->mutex); in check_new_map()
4167 mutex_unlock(&mdsc->mutex); in check_new_map()
4169 send_mds_reconnect(mdsc, s); in check_new_map()
4170 mutex_lock(&mdsc->mutex); in check_new_map()
4181 kick_requests(mdsc, i); in check_new_map()
4182 mutex_unlock(&mdsc->mutex); in check_new_map()
4184 mutex_lock(&mdsc->mutex); in check_new_map()
4185 ceph_kick_flushing_caps(mdsc, s); in check_new_map()
4219 s = __ceph_lookup_mds_session(mdsc, i); in check_new_map()
4221 s = __open_export_target_session(mdsc, i); in check_new_map()
4230 mutex_unlock(&mdsc->mutex); in check_new_map()
4231 send_mds_reconnect(mdsc, s); in check_new_map()
4233 mutex_lock(&mdsc->mutex); in check_new_map()
4236 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) { in check_new_map()
4237 s = mdsc->sessions[i]; in check_new_map()
4247 __open_export_target_sessions(mdsc, s); in check_new_map()
4269 static void handle_lease(struct ceph_mds_client *mdsc, in handle_lease() argument
4273 struct super_block *sb = mdsc->fsc->sb; in handle_lease()
4421 static void maybe_recover_session(struct ceph_mds_client *mdsc) in maybe_recover_session() argument
4423 struct ceph_fs_client *fsc = mdsc->fsc; in maybe_recover_session()
4490 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay) in schedule_delayed() argument
4497 schedule_delayed_work(&mdsc->delayed_work, in schedule_delayed()
4503 struct ceph_mds_client *mdsc = in delayed_work() local
4512 if (mdsc->stopping) in delayed_work()
4515 mutex_lock(&mdsc->mutex); in delayed_work()
4516 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; in delayed_work()
4518 mdsc->last_renew_caps); in delayed_work()
4520 mdsc->last_renew_caps = jiffies; in delayed_work()
4522 for (i = 0; i < mdsc->max_sessions; i++) { in delayed_work()
4523 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); in delayed_work()
4531 mutex_unlock(&mdsc->mutex); in delayed_work()
4535 send_renew_caps(mdsc, s); in delayed_work()
4540 ceph_send_cap_releases(mdsc, s); in delayed_work()
4544 mutex_lock(&mdsc->mutex); in delayed_work()
4546 mutex_unlock(&mdsc->mutex); in delayed_work()
4548 delay = ceph_check_delayed_caps(mdsc); in delayed_work()
4550 ceph_queue_cap_reclaim_work(mdsc); in delayed_work()
4552 ceph_trim_snapid_map(mdsc); in delayed_work()
4554 maybe_recover_session(mdsc); in delayed_work()
4556 schedule_delayed(mdsc, delay); in delayed_work()
4562 struct ceph_mds_client *mdsc; in ceph_mdsc_init() local
4565 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); in ceph_mdsc_init()
4566 if (!mdsc) in ceph_mdsc_init()
4568 mdsc->fsc = fsc; in ceph_mdsc_init()
4569 mutex_init(&mdsc->mutex); in ceph_mdsc_init()
4570 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); in ceph_mdsc_init()
4571 if (!mdsc->mdsmap) { in ceph_mdsc_init()
4576 init_completion(&mdsc->safe_umount_waiters); in ceph_mdsc_init()
4577 init_waitqueue_head(&mdsc->session_close_wq); in ceph_mdsc_init()
4578 INIT_LIST_HEAD(&mdsc->waiting_for_map); in ceph_mdsc_init()
4579 mdsc->quotarealms_inodes = RB_ROOT; in ceph_mdsc_init()
4580 mutex_init(&mdsc->quotarealms_inodes_mutex); in ceph_mdsc_init()
4581 init_rwsem(&mdsc->snap_rwsem); in ceph_mdsc_init()
4582 mdsc->snap_realms = RB_ROOT; in ceph_mdsc_init()
4583 INIT_LIST_HEAD(&mdsc->snap_empty); in ceph_mdsc_init()
4584 spin_lock_init(&mdsc->snap_empty_lock); in ceph_mdsc_init()
4585 mdsc->request_tree = RB_ROOT; in ceph_mdsc_init()
4586 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); in ceph_mdsc_init()
4587 mdsc->last_renew_caps = jiffies; in ceph_mdsc_init()
4588 INIT_LIST_HEAD(&mdsc->cap_delay_list); in ceph_mdsc_init()
4589 INIT_LIST_HEAD(&mdsc->cap_wait_list); in ceph_mdsc_init()
4590 spin_lock_init(&mdsc->cap_delay_lock); in ceph_mdsc_init()
4591 INIT_LIST_HEAD(&mdsc->snap_flush_list); in ceph_mdsc_init()
4592 spin_lock_init(&mdsc->snap_flush_lock); in ceph_mdsc_init()
4593 mdsc->last_cap_flush_tid = 1; in ceph_mdsc_init()
4594 INIT_LIST_HEAD(&mdsc->cap_flush_list); in ceph_mdsc_init()
4595 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); in ceph_mdsc_init()
4596 spin_lock_init(&mdsc->cap_dirty_lock); in ceph_mdsc_init()
4597 init_waitqueue_head(&mdsc->cap_flushing_wq); in ceph_mdsc_init()
4598 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work); in ceph_mdsc_init()
4599 err = ceph_metric_init(&mdsc->metric); in ceph_mdsc_init()
4603 spin_lock_init(&mdsc->dentry_list_lock); in ceph_mdsc_init()
4604 INIT_LIST_HEAD(&mdsc->dentry_leases); in ceph_mdsc_init()
4605 INIT_LIST_HEAD(&mdsc->dentry_dir_leases); in ceph_mdsc_init()
4607 ceph_caps_init(mdsc); in ceph_mdsc_init()
4608 ceph_adjust_caps_max_min(mdsc, fsc->mount_options); in ceph_mdsc_init()
4610 spin_lock_init(&mdsc->snapid_map_lock); in ceph_mdsc_init()
4611 mdsc->snapid_map_tree = RB_ROOT; in ceph_mdsc_init()
4612 INIT_LIST_HEAD(&mdsc->snapid_map_lru); in ceph_mdsc_init()
4614 init_rwsem(&mdsc->pool_perm_rwsem); in ceph_mdsc_init()
4615 mdsc->pool_perm_tree = RB_ROOT; in ceph_mdsc_init()
4617 strscpy(mdsc->nodename, utsname()->nodename, in ceph_mdsc_init()
4618 sizeof(mdsc->nodename)); in ceph_mdsc_init()
4620 fsc->mdsc = mdsc; in ceph_mdsc_init()
4624 kfree(mdsc->mdsmap); in ceph_mdsc_init()
4626 kfree(mdsc); in ceph_mdsc_init()
4634 static void wait_requests(struct ceph_mds_client *mdsc) in wait_requests() argument
4636 struct ceph_options *opts = mdsc->fsc->client->options; in wait_requests()
4639 mutex_lock(&mdsc->mutex); in wait_requests()
4640 if (__get_oldest_req(mdsc)) { in wait_requests()
4641 mutex_unlock(&mdsc->mutex); in wait_requests()
4644 wait_for_completion_timeout(&mdsc->safe_umount_waiters, in wait_requests()
4648 mutex_lock(&mdsc->mutex); in wait_requests()
4649 while ((req = __get_oldest_req(mdsc))) { in wait_requests()
4653 __unregister_request(mdsc, req); in wait_requests()
4656 mutex_unlock(&mdsc->mutex); in wait_requests()
4688 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) in ceph_mdsc_pre_umount() argument
4691 mdsc->stopping = 1; in ceph_mdsc_pre_umount()
4693 ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); in ceph_mdsc_pre_umount()
4694 ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); in ceph_mdsc_pre_umount()
4695 ceph_flush_dirty_caps(mdsc); in ceph_mdsc_pre_umount()
4696 wait_requests(mdsc); in ceph_mdsc_pre_umount()
4704 ceph_cleanup_quotarealms_inodes(mdsc); in ceph_mdsc_pre_umount()
4710 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) in wait_unsafe_requests() argument
4715 mutex_lock(&mdsc->mutex); in wait_unsafe_requests()
4718 req = __get_oldest_req(mdsc); in wait_unsafe_requests()
4732 mutex_unlock(&mdsc->mutex); in wait_unsafe_requests()
4736 mutex_lock(&mdsc->mutex); in wait_unsafe_requests()
4749 mutex_unlock(&mdsc->mutex); in wait_unsafe_requests()
4753 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) in ceph_mdsc_sync() argument
4757 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) in ceph_mdsc_sync()
4761 mutex_lock(&mdsc->mutex); in ceph_mdsc_sync()
4762 want_tid = mdsc->last_tid; in ceph_mdsc_sync()
4763 mutex_unlock(&mdsc->mutex); in ceph_mdsc_sync()
4765 ceph_flush_dirty_caps(mdsc); in ceph_mdsc_sync()
4766 spin_lock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
4767 want_flush = mdsc->last_cap_flush_tid; in ceph_mdsc_sync()
4768 if (!list_empty(&mdsc->cap_flush_list)) { in ceph_mdsc_sync()
4770 list_last_entry(&mdsc->cap_flush_list, in ceph_mdsc_sync()
4774 spin_unlock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
4779 wait_unsafe_requests(mdsc, want_tid); in ceph_mdsc_sync()
4780 wait_caps_flush(mdsc, want_flush); in ceph_mdsc_sync()
4786 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) in done_closing_sessions() argument
4788 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) in done_closing_sessions()
4790 return atomic_read(&mdsc->num_sessions) <= skipped; in done_closing_sessions()
4796 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) in ceph_mdsc_close_sessions() argument
4798 struct ceph_options *opts = mdsc->fsc->client->options; in ceph_mdsc_close_sessions()
4806 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4807 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
4808 session = __ceph_lookup_mds_session(mdsc, i); in ceph_mdsc_close_sessions()
4811 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4813 if (__close_session(mdsc, session) <= 0) in ceph_mdsc_close_sessions()
4817 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4819 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4822 wait_event_timeout(mdsc->session_close_wq, in ceph_mdsc_close_sessions()
4823 done_closing_sessions(mdsc, skipped), in ceph_mdsc_close_sessions()
4827 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4828 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
4829 if (mdsc->sessions[i]) { in ceph_mdsc_close_sessions()
4830 session = ceph_get_mds_session(mdsc->sessions[i]); in ceph_mdsc_close_sessions()
4831 __unregister_session(mdsc, session); in ceph_mdsc_close_sessions()
4832 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4837 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4840 WARN_ON(!list_empty(&mdsc->cap_delay_list)); in ceph_mdsc_close_sessions()
4841 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
4843 ceph_cleanup_snapid_map(mdsc); in ceph_mdsc_close_sessions()
4844 ceph_cleanup_empty_realms(mdsc); in ceph_mdsc_close_sessions()
4846 cancel_work_sync(&mdsc->cap_reclaim_work); in ceph_mdsc_close_sessions()
4847 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ in ceph_mdsc_close_sessions()
4852 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) in ceph_mdsc_force_umount() argument
4859 mutex_lock(&mdsc->mutex); in ceph_mdsc_force_umount()
4860 for (mds = 0; mds < mdsc->max_sessions; mds++) { in ceph_mdsc_force_umount()
4861 session = __ceph_lookup_mds_session(mdsc, mds); in ceph_mdsc_force_umount()
4866 __unregister_session(mdsc, session); in ceph_mdsc_force_umount()
4867 __wake_requests(mdsc, &session->s_waiting); in ceph_mdsc_force_umount()
4868 mutex_unlock(&mdsc->mutex); in ceph_mdsc_force_umount()
4871 __close_session(mdsc, session); in ceph_mdsc_force_umount()
4873 cleanup_session_requests(mdsc, session); in ceph_mdsc_force_umount()
4879 mutex_lock(&mdsc->mutex); in ceph_mdsc_force_umount()
4880 kick_requests(mdsc, mds); in ceph_mdsc_force_umount()
4882 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_force_umount()
4883 mutex_unlock(&mdsc->mutex); in ceph_mdsc_force_umount()
4886 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) in ceph_mdsc_stop() argument
4897 flush_delayed_work(&mdsc->delayed_work); in ceph_mdsc_stop()
4899 if (mdsc->mdsmap) in ceph_mdsc_stop()
4900 ceph_mdsmap_destroy(mdsc->mdsmap); in ceph_mdsc_stop()
4901 kfree(mdsc->sessions); in ceph_mdsc_stop()
4902 ceph_caps_finalize(mdsc); in ceph_mdsc_stop()
4903 ceph_pool_perm_destroy(mdsc); in ceph_mdsc_stop()
4908 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_mdsc_destroy() local
4909 dout("mdsc_destroy %p\n", mdsc); in ceph_mdsc_destroy()
4911 if (!mdsc) in ceph_mdsc_destroy()
4917 ceph_mdsc_stop(mdsc); in ceph_mdsc_destroy()
4919 ceph_metric_destroy(&mdsc->metric); in ceph_mdsc_destroy()
4921 fsc->mdsc = NULL; in ceph_mdsc_destroy()
4922 kfree(mdsc); in ceph_mdsc_destroy()
4923 dout("mdsc_destroy %p done\n", mdsc); in ceph_mdsc_destroy()
4926 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) in ceph_mdsc_handle_fsmap() argument
4928 struct ceph_fs_client *fsc = mdsc->fsc; in ceph_mdsc_handle_fsmap()
4986 ceph_umount_begin(mdsc->fsc->sb); in ceph_mdsc_handle_fsmap()
4988 mutex_lock(&mdsc->mutex); in ceph_mdsc_handle_fsmap()
4989 mdsc->mdsmap_err = err; in ceph_mdsc_handle_fsmap()
4990 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_handle_fsmap()
4991 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_fsmap()
4997 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) in ceph_mdsc_handle_mdsmap() argument
5009 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) in ceph_mdsc_handle_mdsmap()
5016 mutex_lock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
5017 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { in ceph_mdsc_handle_mdsmap()
5019 epoch, mdsc->mdsmap->m_epoch); in ceph_mdsc_handle_mdsmap()
5020 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
5024 newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client)); in ceph_mdsc_handle_mdsmap()
5031 if (mdsc->mdsmap) { in ceph_mdsc_handle_mdsmap()
5032 oldmap = mdsc->mdsmap; in ceph_mdsc_handle_mdsmap()
5033 mdsc->mdsmap = newmap; in ceph_mdsc_handle_mdsmap()
5034 check_new_map(mdsc, newmap, oldmap); in ceph_mdsc_handle_mdsmap()
5037 mdsc->mdsmap = newmap; /* first mds map */ in ceph_mdsc_handle_mdsmap()
5039 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size, in ceph_mdsc_handle_mdsmap()
5042 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_handle_mdsmap()
5043 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, in ceph_mdsc_handle_mdsmap()
5044 mdsc->mdsmap->m_epoch); in ceph_mdsc_handle_mdsmap()
5046 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
5047 schedule_delayed(mdsc, 0); in ceph_mdsc_handle_mdsmap()
5051 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_mdsmap()
5054 ceph_umount_begin(mdsc->fsc->sb); in ceph_mdsc_handle_mdsmap()
5081 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_peer_reset() local
5084 send_mds_reconnect(mdsc, s); in mds_peer_reset()
5090 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_dispatch() local
5093 mutex_lock(&mdsc->mutex); in mds_dispatch()
5094 if (__verify_registered_session(mdsc, s) < 0) { in mds_dispatch()
5095 mutex_unlock(&mdsc->mutex); in mds_dispatch()
5098 mutex_unlock(&mdsc->mutex); in mds_dispatch()
5102 ceph_mdsc_handle_mdsmap(mdsc, msg); in mds_dispatch()
5105 ceph_mdsc_handle_fsmap(mdsc, msg); in mds_dispatch()
5114 handle_forward(mdsc, s, msg); in mds_dispatch()
5120 ceph_handle_snap(mdsc, s, msg); in mds_dispatch()
5123 handle_lease(mdsc, s, msg); in mds_dispatch()
5126 ceph_handle_quota(mdsc, s, msg); in mds_dispatch()
5149 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_get_authorizer() local
5150 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_get_authorizer()
5166 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_add_authorizer_challenge() local
5167 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_add_authorizer_challenge()
5176 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_verify_authorizer_reply() local
5177 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_verify_authorizer_reply()
5188 struct ceph_mds_client *mdsc = s->s_mdsc; in mds_invalidate_authorizer() local
5189 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in mds_invalidate_authorizer()
5193 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); in mds_invalidate_authorizer()