1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
6 **
7 **
8 *******************************************************************************
9 ******************************************************************************/
10
11 #include "dlm_internal.h"
12 #include "member.h"
13 #include "lock.h"
14 #include "dir.h"
15 #include "config.h"
16 #include "requestqueue.h"
17
18 struct rq_entry {
19 struct list_head list;
20 uint32_t recover_seq;
21 int nodeid;
22 struct dlm_message request;
23 };
24
25 /*
26 * Requests received while the lockspace is in recovery get added to the
27 * request queue and processed when recovery is complete. This happens when
28 * the lockspace is suspended on some nodes before it is on others, or the
29 * lockspace is enabled on some while still suspended on others.
30 */
31
dlm_add_requestqueue(struct dlm_ls * ls,int nodeid,struct dlm_message * ms)32 void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
33 {
34 struct rq_entry *e;
35 int length = ms->m_header.h_length - sizeof(struct dlm_message);
36
37 e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
38 if (!e) {
39 log_print("dlm_add_requestqueue: out of memory len %d", length);
40 return;
41 }
42
43 e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
44 e->nodeid = nodeid;
45 memcpy(&e->request, ms, ms->m_header.h_length);
46
47 mutex_lock(&ls->ls_requestqueue_mutex);
48 list_add_tail(&e->list, &ls->ls_requestqueue);
49 mutex_unlock(&ls->ls_requestqueue_mutex);
50 }
51
52 /*
53 * Called by dlm_recoverd to process normal messages saved while recovery was
54 * happening. Normal locking has been enabled before this is called. dlm_recv
55 * upon receiving a message, will wait for all saved messages to be drained
56 * here before processing the message it got. If a new dlm_ls_stop() arrives
57 * while we're processing these saved messages, it may block trying to suspend
58 * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that
59 * case, we don't abort since locking_stopped is still 0. If dlm_recv is not
60 * waiting for us, then this processing may be aborted due to locking_stopped.
61 */
62
dlm_process_requestqueue(struct dlm_ls * ls)63 int dlm_process_requestqueue(struct dlm_ls *ls)
64 {
65 struct rq_entry *e;
66 struct dlm_message *ms;
67 int error = 0;
68
69 mutex_lock(&ls->ls_requestqueue_mutex);
70
71 for (;;) {
72 if (list_empty(&ls->ls_requestqueue)) {
73 mutex_unlock(&ls->ls_requestqueue_mutex);
74 error = 0;
75 break;
76 }
77 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
78 mutex_unlock(&ls->ls_requestqueue_mutex);
79
80 ms = &e->request;
81
82 log_limit(ls, "dlm_process_requestqueue msg %d from %d "
83 "lkid %x remid %x result %d seq %u",
84 ms->m_type, ms->m_header.h_nodeid,
85 ms->m_lkid, ms->m_remid, ms->m_result,
86 e->recover_seq);
87
88 dlm_receive_message_saved(ls, &e->request, e->recover_seq);
89
90 mutex_lock(&ls->ls_requestqueue_mutex);
91 list_del(&e->list);
92 kfree(e);
93
94 if (dlm_locking_stopped(ls)) {
95 log_debug(ls, "process_requestqueue abort running");
96 mutex_unlock(&ls->ls_requestqueue_mutex);
97 error = -EINTR;
98 break;
99 }
100 schedule();
101 }
102
103 return error;
104 }
105
106 /*
107 * After recovery is done, locking is resumed and dlm_recoverd takes all the
108 * saved requests and processes them as they would have been by dlm_recv. At
109 * the same time, dlm_recv will start receiving new requests from remote nodes.
110 * We want to delay dlm_recv processing new requests until dlm_recoverd has
111 * finished processing the old saved requests. We don't check for locking
112 * stopped here because dlm_ls_stop won't stop locking until it's suspended us
113 * (dlm_recv).
114 */
115
dlm_wait_requestqueue(struct dlm_ls * ls)116 void dlm_wait_requestqueue(struct dlm_ls *ls)
117 {
118 for (;;) {
119 mutex_lock(&ls->ls_requestqueue_mutex);
120 if (list_empty(&ls->ls_requestqueue))
121 break;
122 mutex_unlock(&ls->ls_requestqueue_mutex);
123 schedule();
124 }
125 mutex_unlock(&ls->ls_requestqueue_mutex);
126 }
127
purge_request(struct dlm_ls * ls,struct dlm_message * ms,int nodeid)128 static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
129 {
130 uint32_t type = ms->m_type;
131
132 /* the ls is being cleaned up and freed by release_lockspace */
133 if (!ls->ls_count)
134 return 1;
135
136 if (dlm_is_removed(ls, nodeid))
137 return 1;
138
139 /* directory operations are always purged because the directory is
140 always rebuilt during recovery and the lookups resent */
141
142 if (type == DLM_MSG_REMOVE ||
143 type == DLM_MSG_LOOKUP ||
144 type == DLM_MSG_LOOKUP_REPLY)
145 return 1;
146
147 if (!dlm_no_directory(ls))
148 return 0;
149
150 return 1;
151 }
152
dlm_purge_requestqueue(struct dlm_ls * ls)153 void dlm_purge_requestqueue(struct dlm_ls *ls)
154 {
155 struct dlm_message *ms;
156 struct rq_entry *e, *safe;
157
158 mutex_lock(&ls->ls_requestqueue_mutex);
159 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
160 ms = &e->request;
161
162 if (purge_request(ls, ms, e->nodeid)) {
163 list_del(&e->list);
164 kfree(e);
165 }
166 }
167 mutex_unlock(&ls->ls_requestqueue_mutex);
168 }
169
170