1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29
30 struct workqueue_struct *gfs2_freeze_wq;
31
32 extern struct workqueue_struct *gfs2_control_wq;
33
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_page->mapping, bh->b_page->flags);
43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
46 gfs2_lm(sdp, "AIL error\n");
47 gfs2_withdraw_delayed(sdp);
48 }
49
50 /**
51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52 * @gl: the glock
53 * @fsync: set when called from fsync (not all buffers will be clean)
54 * @nr_revokes: Number of buffers to revoke
55 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
61 {
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 struct list_head *head = &gl->gl_ail_list;
64 struct gfs2_bufdata *bd, *tmp;
65 struct buffer_head *bh;
66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67
68 gfs2_log_lock(sdp);
69 spin_lock(&sdp->sd_ail_lock);
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
73 bh = bd->bd_bh;
74 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
77 gfs2_ail_error(gl, bh);
78 }
79 gfs2_trans_add_revoke(sdp, bd);
80 nr_revokes--;
81 }
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 spin_unlock(&sdp->sd_ail_lock);
84 gfs2_log_unlock(sdp);
85 }
86
87
gfs2_ail_empty_gl(struct gfs2_glock * gl)88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
89 {
90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
91 struct gfs2_trans tr;
92 unsigned int revokes;
93 int ret;
94
95 revokes = atomic_read(&gl->gl_ail_count);
96
97 if (!revokes) {
98 bool have_revokes;
99 bool log_in_flight;
100
101 /*
102 * We have nothing on the ail, but there could be revokes on
103 * the sdp revoke queue, in which case, we still want to flush
104 * the log and wait for it to finish.
105 *
106 * If the sdp revoke list is empty too, we might still have an
107 * io outstanding for writing revokes, so we should wait for
108 * it before returning.
109 *
110 * If none of these conditions are true, our revokes are all
111 * flushed and we can return.
112 */
113 gfs2_log_lock(sdp);
114 have_revokes = !list_empty(&sdp->sd_log_revokes);
115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 gfs2_log_unlock(sdp);
117 if (have_revokes)
118 goto flush;
119 if (log_in_flight)
120 log_flush_wait(sdp);
121 return 0;
122 }
123
124 memset(&tr, 0, sizeof(tr));
125 set_bit(TR_ONSTACK, &tr.tr_flags);
126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 if (ret)
128 goto flush;
129 __gfs2_ail_flush(gl, 0, revokes);
130 gfs2_trans_end(sdp);
131
132 flush:
133 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 GFS2_LFC_AIL_EMPTY_GL);
135 return 0;
136 }
137
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)138 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
139 {
140 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
141 unsigned int revokes = atomic_read(&gl->gl_ail_count);
142 int ret;
143
144 if (!revokes)
145 return;
146
147 ret = gfs2_trans_begin(sdp, 0, revokes);
148 if (ret)
149 return;
150 __gfs2_ail_flush(gl, fsync, revokes);
151 gfs2_trans_end(sdp);
152 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
153 GFS2_LFC_AIL_FLUSH);
154 }
155
156 /**
157 * gfs2_rgrp_metasync - sync out the metadata of a resource group
158 * @gl: the glock protecting the resource group
159 *
160 */
161
gfs2_rgrp_metasync(struct gfs2_glock * gl)162 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
163 {
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
165 struct address_space *metamapping = &sdp->sd_aspace;
166 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
167 const unsigned bsize = sdp->sd_sb.sb_bsize;
168 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
169 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
170 int error;
171
172 filemap_fdatawrite_range(metamapping, start, end);
173 error = filemap_fdatawait_range(metamapping, start, end);
174 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
175 mapping_set_error(metamapping, error);
176 if (error)
177 gfs2_io_error(sdp);
178 return error;
179 }
180
181 /**
182 * rgrp_go_sync - sync out the metadata for this glock
183 * @gl: the glock
184 *
185 * Called when demoting or unlocking an EX glock. We must flush
186 * to disk all dirty buffers/pages relating to this glock, and must not
187 * return to caller to demote/unlock the glock until I/O is complete.
188 */
189
rgrp_go_sync(struct gfs2_glock * gl)190 static int rgrp_go_sync(struct gfs2_glock *gl)
191 {
192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
193 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
194 int error;
195
196 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
197 return 0;
198 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
199
200 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
201 GFS2_LFC_RGRP_GO_SYNC);
202 error = gfs2_rgrp_metasync(gl);
203 if (!error)
204 error = gfs2_ail_empty_gl(gl);
205 gfs2_free_clones(rgd);
206 return error;
207 }
208
209 /**
210 * rgrp_go_inval - invalidate the metadata for this glock
211 * @gl: the glock
212 * @flags:
213 *
214 * We never used LM_ST_DEFERRED with resource groups, so that we
215 * should always see the metadata flag set here.
216 *
217 */
218
rgrp_go_inval(struct gfs2_glock * gl,int flags)219 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
220 {
221 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
222 struct address_space *mapping = &sdp->sd_aspace;
223 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
224 const unsigned bsize = sdp->sd_sb.sb_bsize;
225 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
226 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
227
228 gfs2_rgrp_brelse(rgd);
229 WARN_ON_ONCE(!(flags & DIO_METADATA));
230 truncate_inode_pages_range(mapping, start, end);
231 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
232 }
233
gfs2_rgrp_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)234 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
235 const char *fs_id_buf)
236 {
237 struct gfs2_rgrpd *rgd = gl->gl_object;
238
239 if (rgd)
240 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
241 }
242
gfs2_glock2inode(struct gfs2_glock * gl)243 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
244 {
245 struct gfs2_inode *ip;
246
247 spin_lock(&gl->gl_lockref.lock);
248 ip = gl->gl_object;
249 if (ip)
250 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
251 spin_unlock(&gl->gl_lockref.lock);
252 return ip;
253 }
254
gfs2_glock2rgrp(struct gfs2_glock * gl)255 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
256 {
257 struct gfs2_rgrpd *rgd;
258
259 spin_lock(&gl->gl_lockref.lock);
260 rgd = gl->gl_object;
261 spin_unlock(&gl->gl_lockref.lock);
262
263 return rgd;
264 }
265
gfs2_clear_glop_pending(struct gfs2_inode * ip)266 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
267 {
268 if (!ip)
269 return;
270
271 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
272 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
273 }
274
275 /**
276 * gfs2_inode_metasync - sync out the metadata of an inode
277 * @gl: the glock protecting the inode
278 *
279 */
gfs2_inode_metasync(struct gfs2_glock * gl)280 int gfs2_inode_metasync(struct gfs2_glock *gl)
281 {
282 struct address_space *metamapping = gfs2_glock2aspace(gl);
283 int error;
284
285 filemap_fdatawrite(metamapping);
286 error = filemap_fdatawait(metamapping);
287 if (error)
288 gfs2_io_error(gl->gl_name.ln_sbd);
289 return error;
290 }
291
292 /**
293 * inode_go_sync - Sync the dirty metadata of an inode
294 * @gl: the glock protecting the inode
295 *
296 */
297
inode_go_sync(struct gfs2_glock * gl)298 static int inode_go_sync(struct gfs2_glock *gl)
299 {
300 struct gfs2_inode *ip = gfs2_glock2inode(gl);
301 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
302 struct address_space *metamapping = gfs2_glock2aspace(gl);
303 int error = 0, ret;
304
305 if (isreg) {
306 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
307 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
308 inode_dio_wait(&ip->i_inode);
309 }
310 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
311 goto out;
312
313 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
314
315 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
316 GFS2_LFC_INODE_GO_SYNC);
317 filemap_fdatawrite(metamapping);
318 if (isreg) {
319 struct address_space *mapping = ip->i_inode.i_mapping;
320 filemap_fdatawrite(mapping);
321 error = filemap_fdatawait(mapping);
322 mapping_set_error(mapping, error);
323 }
324 ret = gfs2_inode_metasync(gl);
325 if (!error)
326 error = ret;
327 gfs2_ail_empty_gl(gl);
328 /*
329 * Writeback of the data mapping may cause the dirty flag to be set
330 * so we have to clear it again here.
331 */
332 smp_mb__before_atomic();
333 clear_bit(GLF_DIRTY, &gl->gl_flags);
334
335 out:
336 gfs2_clear_glop_pending(ip);
337 return error;
338 }
339
340 /**
341 * inode_go_inval - prepare a inode glock to be released
342 * @gl: the glock
343 * @flags:
344 *
345 * Normally we invalidate everything, but if we are moving into
346 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
347 * can keep hold of the metadata, since it won't have changed.
348 *
349 */
350
inode_go_inval(struct gfs2_glock * gl,int flags)351 static void inode_go_inval(struct gfs2_glock *gl, int flags)
352 {
353 struct gfs2_inode *ip = gfs2_glock2inode(gl);
354
355 if (flags & DIO_METADATA) {
356 struct address_space *mapping = gfs2_glock2aspace(gl);
357 truncate_inode_pages(mapping, 0);
358 if (ip) {
359 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
360 forget_all_cached_acls(&ip->i_inode);
361 security_inode_invalidate_secctx(&ip->i_inode);
362 gfs2_dir_hash_inval(ip);
363 }
364 }
365
366 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
367 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
368 GFS2_LOG_HEAD_FLUSH_NORMAL |
369 GFS2_LFC_INODE_GO_INVAL);
370 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
371 }
372 if (ip && S_ISREG(ip->i_inode.i_mode))
373 truncate_inode_pages(ip->i_inode.i_mapping, 0);
374
375 gfs2_clear_glop_pending(ip);
376 }
377
378 /**
379 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
380 * @gl: the glock
381 *
382 * Returns: 1 if it's ok
383 */
384
inode_go_demote_ok(const struct gfs2_glock * gl)385 static int inode_go_demote_ok(const struct gfs2_glock *gl)
386 {
387 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
388
389 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
390 return 0;
391
392 return 1;
393 }
394
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)395 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
396 {
397 const struct gfs2_dinode *str = buf;
398 struct timespec64 atime;
399 u16 height, depth;
400 umode_t mode = be32_to_cpu(str->di_mode);
401 bool is_new = ip->i_inode.i_state & I_NEW;
402
403 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
404 goto corrupt;
405 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
406 goto corrupt;
407 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
408 ip->i_inode.i_mode = mode;
409 if (is_new) {
410 ip->i_inode.i_rdev = 0;
411 switch (mode & S_IFMT) {
412 case S_IFBLK:
413 case S_IFCHR:
414 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
415 be32_to_cpu(str->di_minor));
416 break;
417 }
418 }
419
420 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
421 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
422 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
423 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
424 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
425 atime.tv_sec = be64_to_cpu(str->di_atime);
426 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
427 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
428 ip->i_inode.i_atime = atime;
429 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
430 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
431 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
432 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
433
434 ip->i_goal = be64_to_cpu(str->di_goal_meta);
435 ip->i_generation = be64_to_cpu(str->di_generation);
436
437 ip->i_diskflags = be32_to_cpu(str->di_flags);
438 ip->i_eattr = be64_to_cpu(str->di_eattr);
439 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
440 gfs2_set_inode_flags(&ip->i_inode);
441 height = be16_to_cpu(str->di_height);
442 if (unlikely(height > GFS2_MAX_META_HEIGHT))
443 goto corrupt;
444 ip->i_height = (u8)height;
445
446 depth = be16_to_cpu(str->di_depth);
447 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
448 goto corrupt;
449 ip->i_depth = (u8)depth;
450 ip->i_entries = be32_to_cpu(str->di_entries);
451
452 if (S_ISREG(ip->i_inode.i_mode))
453 gfs2_set_aops(&ip->i_inode);
454
455 return 0;
456 corrupt:
457 gfs2_consist_inode(ip);
458 return -EIO;
459 }
460
461 /**
462 * gfs2_inode_refresh - Refresh the incore copy of the dinode
463 * @ip: The GFS2 inode
464 *
465 * Returns: errno
466 */
467
gfs2_inode_refresh(struct gfs2_inode * ip)468 int gfs2_inode_refresh(struct gfs2_inode *ip)
469 {
470 struct buffer_head *dibh;
471 int error;
472
473 error = gfs2_meta_inode_buffer(ip, &dibh);
474 if (error)
475 return error;
476
477 error = gfs2_dinode_in(ip, dibh->b_data);
478 brelse(dibh);
479 return error;
480 }
481
482 /**
483 * inode_go_instantiate - read in an inode if necessary
484 * @gh: The glock holder
485 *
486 * Returns: errno
487 */
488
inode_go_instantiate(struct gfs2_holder * gh)489 static int inode_go_instantiate(struct gfs2_holder *gh)
490 {
491 struct gfs2_glock *gl = gh->gh_gl;
492 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
493 struct gfs2_inode *ip = gl->gl_object;
494 int error = 0;
495
496 if (!ip) /* no inode to populate - read it in later */
497 goto out;
498
499 error = gfs2_inode_refresh(ip);
500 if (error)
501 goto out;
502
503 if (gh->gh_state != LM_ST_DEFERRED)
504 inode_dio_wait(&ip->i_inode);
505
506 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
507 (gl->gl_state == LM_ST_EXCLUSIVE) &&
508 (gh->gh_state == LM_ST_EXCLUSIVE)) {
509 spin_lock(&sdp->sd_trunc_lock);
510 if (list_empty(&ip->i_trunc_list))
511 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
512 spin_unlock(&sdp->sd_trunc_lock);
513 wake_up(&sdp->sd_quota_wait);
514 error = 1;
515 }
516
517 out:
518 return error;
519 }
520
521 /**
522 * inode_go_dump - print information about an inode
523 * @seq: The iterator
524 * @gl: The glock
525 * @fs_id_buf: file system id (may be empty)
526 *
527 */
528
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)529 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
530 const char *fs_id_buf)
531 {
532 struct gfs2_inode *ip = gl->gl_object;
533 struct inode *inode = &ip->i_inode;
534 unsigned long nrpages;
535
536 if (ip == NULL)
537 return;
538
539 xa_lock_irq(&inode->i_data.i_pages);
540 nrpages = inode->i_data.nrpages;
541 xa_unlock_irq(&inode->i_data.i_pages);
542
543 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
544 "p:%lu\n", fs_id_buf,
545 (unsigned long long)ip->i_no_formal_ino,
546 (unsigned long long)ip->i_no_addr,
547 IF2DT(ip->i_inode.i_mode), ip->i_flags,
548 (unsigned int)ip->i_diskflags,
549 (unsigned long long)i_size_read(inode), nrpages);
550 }
551
552 /**
553 * freeze_go_sync - promote/demote the freeze glock
554 * @gl: the glock
555 */
556
freeze_go_sync(struct gfs2_glock * gl)557 static int freeze_go_sync(struct gfs2_glock *gl)
558 {
559 int error = 0;
560 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
561
562 /*
563 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
564 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
565 * all the nodes should have the freeze glock in SH mode and they all
566 * call do_xmote: One for EX and the others for UN. They ALL must
567 * freeze locally, and they ALL must queue freeze work. The freeze_work
568 * calls freeze_func, which tries to reacquire the freeze glock in SH,
569 * effectively waiting for the thaw on the node who holds it in EX.
570 * Once thawed, the work func acquires the freeze glock in
571 * SH and everybody goes back to thawed.
572 */
573 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
574 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
575 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
576 error = freeze_super(sdp->sd_vfs);
577 if (error) {
578 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
579 error);
580 if (gfs2_withdrawn(sdp)) {
581 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
582 return 0;
583 }
584 gfs2_assert_withdraw(sdp, 0);
585 }
586 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
587 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
588 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
589 GFS2_LFC_FREEZE_GO_SYNC);
590 else /* read-only mounts */
591 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
592 }
593 return 0;
594 }
595
596 /**
597 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
598 * @gl: the glock
599 */
freeze_go_xmote_bh(struct gfs2_glock * gl)600 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
601 {
602 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
603 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
604 struct gfs2_glock *j_gl = ip->i_gl;
605 struct gfs2_log_header_host head;
606 int error;
607
608 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
609 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
610
611 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
612 if (gfs2_assert_withdraw_delayed(sdp, !error))
613 return error;
614 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
615 GFS2_LOG_HEAD_UNMOUNT))
616 return -EIO;
617 sdp->sd_log_sequence = head.lh_sequence + 1;
618 gfs2_log_pointers_init(sdp, head.lh_blkno);
619 }
620 return 0;
621 }
622
623 /**
624 * freeze_go_demote_ok
625 * @gl: the glock
626 *
627 * Always returns 0
628 */
629
freeze_go_demote_ok(const struct gfs2_glock * gl)630 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
631 {
632 return 0;
633 }
634
635 /**
636 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
637 * @gl: the glock
638 * @remote: true if this came from a different cluster node
639 *
640 * gl_lockref.lock lock is held while calling this
641 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)642 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
643 {
644 struct gfs2_inode *ip = gl->gl_object;
645 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
646
647 if (!remote || sb_rdonly(sdp->sd_vfs))
648 return;
649
650 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
651 gl->gl_state == LM_ST_SHARED && ip) {
652 gl->gl_lockref.count++;
653 if (!queue_delayed_work(gfs2_delete_workqueue,
654 &gl->gl_delete, 0))
655 gl->gl_lockref.count--;
656 }
657 }
658
iopen_go_demote_ok(const struct gfs2_glock * gl)659 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
660 {
661 return !gfs2_delete_work_queued(gl);
662 }
663
664 /**
665 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
666 * @gl: glock being freed
667 *
668 * For now, this is only used for the journal inode glock. In withdraw
669 * situations, we need to wait for the glock to be freed so that we know
670 * other nodes may proceed with recovery / journal replay.
671 */
inode_go_free(struct gfs2_glock * gl)672 static void inode_go_free(struct gfs2_glock *gl)
673 {
674 /* Note that we cannot reference gl_object because it's already set
675 * to NULL by this point in its lifecycle. */
676 if (!test_bit(GLF_FREEING, &gl->gl_flags))
677 return;
678 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
679 wake_up_bit(&gl->gl_flags, GLF_FREEING);
680 }
681
682 /**
683 * nondisk_go_callback - used to signal when a node did a withdraw
684 * @gl: the nondisk glock
685 * @remote: true if this came from a different cluster node
686 *
687 */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)688 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
689 {
690 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
691
692 /* Ignore the callback unless it's from another node, and it's the
693 live lock. */
694 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
695 return;
696
697 /* First order of business is to cancel the demote request. We don't
698 * really want to demote a nondisk glock. At best it's just to inform
699 * us of another node's withdraw. We'll keep it in SH mode. */
700 clear_bit(GLF_DEMOTE, &gl->gl_flags);
701 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
702
703 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
704 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
705 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
706 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
707 return;
708
709 /* We only care when a node wants us to unlock, because that means
710 * they want a journal recovered. */
711 if (gl->gl_demote_state != LM_ST_UNLOCKED)
712 return;
713
714 if (sdp->sd_args.ar_spectator) {
715 fs_warn(sdp, "Spectator node cannot recover journals.\n");
716 return;
717 }
718
719 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
720 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
721 /*
722 * We can't call remote_withdraw directly here or gfs2_recover_journal
723 * because this is called from the glock unlock function and the
724 * remote_withdraw needs to enqueue and dequeue the same "live" glock
725 * we were called from. So we queue it to the control work queue in
726 * lock_dlm.
727 */
728 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
729 }
730
731 const struct gfs2_glock_operations gfs2_meta_glops = {
732 .go_type = LM_TYPE_META,
733 .go_flags = GLOF_NONDISK,
734 };
735
736 const struct gfs2_glock_operations gfs2_inode_glops = {
737 .go_sync = inode_go_sync,
738 .go_inval = inode_go_inval,
739 .go_demote_ok = inode_go_demote_ok,
740 .go_instantiate = inode_go_instantiate,
741 .go_dump = inode_go_dump,
742 .go_type = LM_TYPE_INODE,
743 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
744 .go_free = inode_go_free,
745 };
746
747 const struct gfs2_glock_operations gfs2_rgrp_glops = {
748 .go_sync = rgrp_go_sync,
749 .go_inval = rgrp_go_inval,
750 .go_instantiate = gfs2_rgrp_go_instantiate,
751 .go_dump = gfs2_rgrp_go_dump,
752 .go_type = LM_TYPE_RGRP,
753 .go_flags = GLOF_LVB,
754 };
755
756 const struct gfs2_glock_operations gfs2_freeze_glops = {
757 .go_sync = freeze_go_sync,
758 .go_xmote_bh = freeze_go_xmote_bh,
759 .go_demote_ok = freeze_go_demote_ok,
760 .go_type = LM_TYPE_NONDISK,
761 .go_flags = GLOF_NONDISK,
762 };
763
764 const struct gfs2_glock_operations gfs2_iopen_glops = {
765 .go_type = LM_TYPE_IOPEN,
766 .go_callback = iopen_go_callback,
767 .go_demote_ok = iopen_go_demote_ok,
768 .go_flags = GLOF_LRU | GLOF_NONDISK,
769 .go_subclass = 1,
770 };
771
772 const struct gfs2_glock_operations gfs2_flock_glops = {
773 .go_type = LM_TYPE_FLOCK,
774 .go_flags = GLOF_LRU | GLOF_NONDISK,
775 };
776
777 const struct gfs2_glock_operations gfs2_nondisk_glops = {
778 .go_type = LM_TYPE_NONDISK,
779 .go_flags = GLOF_NONDISK,
780 .go_callback = nondisk_go_callback,
781 };
782
783 const struct gfs2_glock_operations gfs2_quota_glops = {
784 .go_type = LM_TYPE_QUOTA,
785 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
786 };
787
788 const struct gfs2_glock_operations gfs2_journal_glops = {
789 .go_type = LM_TYPE_JOURNAL,
790 .go_flags = GLOF_NONDISK,
791 };
792
793 const struct gfs2_glock_operations *gfs2_glops_list[] = {
794 [LM_TYPE_META] = &gfs2_meta_glops,
795 [LM_TYPE_INODE] = &gfs2_inode_glops,
796 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
797 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
798 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
799 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
800 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
801 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
802 };
803
804