0001
0002
0003
0004
0005
0006
0007 #include <linux/spinlock.h>
0008 #include <linux/completion.h>
0009 #include <linux/buffer_head.h>
0010 #include <linux/gfs2_ondisk.h>
0011 #include <linux/bio.h>
0012 #include <linux/posix_acl.h>
0013 #include <linux/security.h>
0014
0015 #include "gfs2.h"
0016 #include "incore.h"
0017 #include "bmap.h"
0018 #include "glock.h"
0019 #include "glops.h"
0020 #include "inode.h"
0021 #include "log.h"
0022 #include "meta_io.h"
0023 #include "recovery.h"
0024 #include "rgrp.h"
0025 #include "util.h"
0026 #include "trans.h"
0027 #include "dir.h"
0028 #include "lops.h"
0029
0030 struct workqueue_struct *gfs2_freeze_wq;
0031
0032 extern struct workqueue_struct *gfs2_control_wq;
0033
0034 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
0035 {
0036 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0037
0038 fs_err(sdp,
0039 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
0040 "state 0x%lx\n",
0041 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
0042 bh->b_page->mapping, bh->b_page->flags);
0043 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
0044 gl->gl_name.ln_type, gl->gl_name.ln_number,
0045 gfs2_glock2aspace(gl));
0046 gfs2_lm(sdp, "AIL error\n");
0047 gfs2_withdraw_delayed(sdp);
0048 }
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
0060 unsigned int nr_revokes)
0061 {
0062 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0063 struct list_head *head = &gl->gl_ail_list;
0064 struct gfs2_bufdata *bd, *tmp;
0065 struct buffer_head *bh;
0066 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
0067
0068 gfs2_log_lock(sdp);
0069 spin_lock(&sdp->sd_ail_lock);
0070 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
0071 if (nr_revokes == 0)
0072 break;
0073 bh = bd->bd_bh;
0074 if (bh->b_state & b_state) {
0075 if (fsync)
0076 continue;
0077 gfs2_ail_error(gl, bh);
0078 }
0079 gfs2_trans_add_revoke(sdp, bd);
0080 nr_revokes--;
0081 }
0082 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
0083 spin_unlock(&sdp->sd_ail_lock);
0084 gfs2_log_unlock(sdp);
0085 }
0086
0087
0088 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
0089 {
0090 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0091 struct gfs2_trans tr;
0092 unsigned int revokes;
0093 int ret;
0094
0095 revokes = atomic_read(&gl->gl_ail_count);
0096
0097 if (!revokes) {
0098 bool have_revokes;
0099 bool log_in_flight;
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 gfs2_log_lock(sdp);
0114 have_revokes = !list_empty(&sdp->sd_log_revokes);
0115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
0116 gfs2_log_unlock(sdp);
0117 if (have_revokes)
0118 goto flush;
0119 if (log_in_flight)
0120 log_flush_wait(sdp);
0121 return 0;
0122 }
0123
0124 memset(&tr, 0, sizeof(tr));
0125 set_bit(TR_ONSTACK, &tr.tr_flags);
0126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
0127 if (ret)
0128 goto flush;
0129 __gfs2_ail_flush(gl, 0, revokes);
0130 gfs2_trans_end(sdp);
0131
0132 flush:
0133 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
0134 GFS2_LFC_AIL_EMPTY_GL);
0135 return 0;
0136 }
0137
0138 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
0139 {
0140 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0141 unsigned int revokes = atomic_read(&gl->gl_ail_count);
0142 int ret;
0143
0144 if (!revokes)
0145 return;
0146
0147 ret = gfs2_trans_begin(sdp, 0, revokes);
0148 if (ret)
0149 return;
0150 __gfs2_ail_flush(gl, fsync, revokes);
0151 gfs2_trans_end(sdp);
0152 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
0153 GFS2_LFC_AIL_FLUSH);
0154 }
0155
0156
0157
0158
0159
0160
0161
0162 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
0163 {
0164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0165 struct address_space *metamapping = &sdp->sd_aspace;
0166 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
0167 const unsigned bsize = sdp->sd_sb.sb_bsize;
0168 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
0169 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
0170 int error;
0171
0172 filemap_fdatawrite_range(metamapping, start, end);
0173 error = filemap_fdatawait_range(metamapping, start, end);
0174 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
0175 mapping_set_error(metamapping, error);
0176 if (error)
0177 gfs2_io_error(sdp);
0178 return error;
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 static int rgrp_go_sync(struct gfs2_glock *gl)
0191 {
0192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0193 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
0194 int error;
0195
0196 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
0197 return 0;
0198 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
0199
0200 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
0201 GFS2_LFC_RGRP_GO_SYNC);
0202 error = gfs2_rgrp_metasync(gl);
0203 if (!error)
0204 error = gfs2_ail_empty_gl(gl);
0205 gfs2_free_clones(rgd);
0206 return error;
0207 }
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
0220 {
0221 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0222 struct address_space *mapping = &sdp->sd_aspace;
0223 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
0224 const unsigned bsize = sdp->sd_sb.sb_bsize;
0225 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
0226 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
0227
0228 gfs2_rgrp_brelse(rgd);
0229 WARN_ON_ONCE(!(flags & DIO_METADATA));
0230 truncate_inode_pages_range(mapping, start, end);
0231 }
0232
0233 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
0234 const char *fs_id_buf)
0235 {
0236 struct gfs2_rgrpd *rgd = gl->gl_object;
0237
0238 if (rgd)
0239 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
0240 }
0241
0242 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
0243 {
0244 struct gfs2_inode *ip;
0245
0246 spin_lock(&gl->gl_lockref.lock);
0247 ip = gl->gl_object;
0248 if (ip)
0249 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
0250 spin_unlock(&gl->gl_lockref.lock);
0251 return ip;
0252 }
0253
0254 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
0255 {
0256 struct gfs2_rgrpd *rgd;
0257
0258 spin_lock(&gl->gl_lockref.lock);
0259 rgd = gl->gl_object;
0260 spin_unlock(&gl->gl_lockref.lock);
0261
0262 return rgd;
0263 }
0264
0265 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
0266 {
0267 if (!ip)
0268 return;
0269
0270 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
0271 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
0272 }
0273
0274
0275
0276
0277
0278
0279 int gfs2_inode_metasync(struct gfs2_glock *gl)
0280 {
0281 struct address_space *metamapping = gfs2_glock2aspace(gl);
0282 int error;
0283
0284 filemap_fdatawrite(metamapping);
0285 error = filemap_fdatawait(metamapping);
0286 if (error)
0287 gfs2_io_error(gl->gl_name.ln_sbd);
0288 return error;
0289 }
0290
0291
0292
0293
0294
0295
0296
0297 static int inode_go_sync(struct gfs2_glock *gl)
0298 {
0299 struct gfs2_inode *ip = gfs2_glock2inode(gl);
0300 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
0301 struct address_space *metamapping = gfs2_glock2aspace(gl);
0302 int error = 0, ret;
0303
0304 if (isreg) {
0305 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
0306 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
0307 inode_dio_wait(&ip->i_inode);
0308 }
0309 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
0310 goto out;
0311
0312 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
0313
0314 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
0315 GFS2_LFC_INODE_GO_SYNC);
0316 filemap_fdatawrite(metamapping);
0317 if (isreg) {
0318 struct address_space *mapping = ip->i_inode.i_mapping;
0319 filemap_fdatawrite(mapping);
0320 error = filemap_fdatawait(mapping);
0321 mapping_set_error(mapping, error);
0322 }
0323 ret = gfs2_inode_metasync(gl);
0324 if (!error)
0325 error = ret;
0326 gfs2_ail_empty_gl(gl);
0327
0328
0329
0330
0331 smp_mb__before_atomic();
0332 clear_bit(GLF_DIRTY, &gl->gl_flags);
0333
0334 out:
0335 gfs2_clear_glop_pending(ip);
0336 return error;
0337 }
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350 static void inode_go_inval(struct gfs2_glock *gl, int flags)
0351 {
0352 struct gfs2_inode *ip = gfs2_glock2inode(gl);
0353
0354 if (flags & DIO_METADATA) {
0355 struct address_space *mapping = gfs2_glock2aspace(gl);
0356 truncate_inode_pages(mapping, 0);
0357 if (ip) {
0358 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
0359 forget_all_cached_acls(&ip->i_inode);
0360 security_inode_invalidate_secctx(&ip->i_inode);
0361 gfs2_dir_hash_inval(ip);
0362 }
0363 }
0364
0365 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
0366 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
0367 GFS2_LOG_HEAD_FLUSH_NORMAL |
0368 GFS2_LFC_INODE_GO_INVAL);
0369 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
0370 }
0371 if (ip && S_ISREG(ip->i_inode.i_mode))
0372 truncate_inode_pages(ip->i_inode.i_mapping, 0);
0373
0374 gfs2_clear_glop_pending(ip);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384 static int inode_go_demote_ok(const struct gfs2_glock *gl)
0385 {
0386 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0387
0388 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
0389 return 0;
0390
0391 return 1;
0392 }
0393
0394 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
0395 {
0396 const struct gfs2_dinode *str = buf;
0397 struct timespec64 atime;
0398 u16 height, depth;
0399 umode_t mode = be32_to_cpu(str->di_mode);
0400 bool is_new = ip->i_inode.i_state & I_NEW;
0401
0402 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
0403 goto corrupt;
0404 if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
0405 goto corrupt;
0406 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
0407 ip->i_inode.i_mode = mode;
0408 if (is_new) {
0409 ip->i_inode.i_rdev = 0;
0410 switch (mode & S_IFMT) {
0411 case S_IFBLK:
0412 case S_IFCHR:
0413 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
0414 be32_to_cpu(str->di_minor));
0415 break;
0416 }
0417 }
0418
0419 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
0420 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
0421 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
0422 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
0423 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
0424 atime.tv_sec = be64_to_cpu(str->di_atime);
0425 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
0426 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
0427 ip->i_inode.i_atime = atime;
0428 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
0429 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
0430 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
0431 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
0432
0433 ip->i_goal = be64_to_cpu(str->di_goal_meta);
0434 ip->i_generation = be64_to_cpu(str->di_generation);
0435
0436 ip->i_diskflags = be32_to_cpu(str->di_flags);
0437 ip->i_eattr = be64_to_cpu(str->di_eattr);
0438
0439 gfs2_set_inode_flags(&ip->i_inode);
0440 height = be16_to_cpu(str->di_height);
0441 if (unlikely(height > GFS2_MAX_META_HEIGHT))
0442 goto corrupt;
0443 ip->i_height = (u8)height;
0444
0445 depth = be16_to_cpu(str->di_depth);
0446 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
0447 goto corrupt;
0448 ip->i_depth = (u8)depth;
0449 ip->i_entries = be32_to_cpu(str->di_entries);
0450
0451 if (S_ISREG(ip->i_inode.i_mode))
0452 gfs2_set_aops(&ip->i_inode);
0453
0454 return 0;
0455 corrupt:
0456 gfs2_consist_inode(ip);
0457 return -EIO;
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467 int gfs2_inode_refresh(struct gfs2_inode *ip)
0468 {
0469 struct buffer_head *dibh;
0470 int error;
0471
0472 error = gfs2_meta_inode_buffer(ip, &dibh);
0473 if (error)
0474 return error;
0475
0476 error = gfs2_dinode_in(ip, dibh->b_data);
0477 brelse(dibh);
0478 return error;
0479 }
0480
0481
0482
0483
0484
0485
0486
0487
0488 static int inode_go_instantiate(struct gfs2_glock *gl)
0489 {
0490 struct gfs2_inode *ip = gl->gl_object;
0491
0492 if (!ip)
0493 return 0;
0494
0495 return gfs2_inode_refresh(ip);
0496 }
0497
0498 static int inode_go_held(struct gfs2_holder *gh)
0499 {
0500 struct gfs2_glock *gl = gh->gh_gl;
0501 struct gfs2_inode *ip = gl->gl_object;
0502 int error = 0;
0503
0504 if (!ip)
0505 return 0;
0506
0507 if (gh->gh_state != LM_ST_DEFERRED)
0508 inode_dio_wait(&ip->i_inode);
0509
0510 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
0511 (gl->gl_state == LM_ST_EXCLUSIVE) &&
0512 (gh->gh_state == LM_ST_EXCLUSIVE))
0513 error = gfs2_truncatei_resume(ip);
0514
0515 return error;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
0527 const char *fs_id_buf)
0528 {
0529 struct gfs2_inode *ip = gl->gl_object;
0530 struct inode *inode = &ip->i_inode;
0531 unsigned long nrpages;
0532
0533 if (ip == NULL)
0534 return;
0535
0536 xa_lock_irq(&inode->i_data.i_pages);
0537 nrpages = inode->i_data.nrpages;
0538 xa_unlock_irq(&inode->i_data.i_pages);
0539
0540 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
0541 "p:%lu\n", fs_id_buf,
0542 (unsigned long long)ip->i_no_formal_ino,
0543 (unsigned long long)ip->i_no_addr,
0544 IF2DT(ip->i_inode.i_mode), ip->i_flags,
0545 (unsigned int)ip->i_diskflags,
0546 (unsigned long long)i_size_read(inode), nrpages);
0547 }
0548
0549
0550
0551
0552
0553
0554 static int freeze_go_sync(struct gfs2_glock *gl)
0555 {
0556 int error = 0;
0557 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
0571 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
0572 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
0573 error = freeze_super(sdp->sd_vfs);
0574 if (error) {
0575 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
0576 error);
0577 if (gfs2_withdrawn(sdp)) {
0578 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
0579 return 0;
0580 }
0581 gfs2_assert_withdraw(sdp, 0);
0582 }
0583 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
0584 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
0585 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
0586 GFS2_LFC_FREEZE_GO_SYNC);
0587 else
0588 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
0589 }
0590 return 0;
0591 }
0592
0593
0594
0595
0596
0597 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
0598 {
0599 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0600 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
0601 struct gfs2_glock *j_gl = ip->i_gl;
0602 struct gfs2_log_header_host head;
0603 int error;
0604
0605 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
0606 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
0607
0608 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
0609 if (gfs2_assert_withdraw_delayed(sdp, !error))
0610 return error;
0611 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
0612 GFS2_LOG_HEAD_UNMOUNT))
0613 return -EIO;
0614 sdp->sd_log_sequence = head.lh_sequence + 1;
0615 gfs2_log_pointers_init(sdp, head.lh_blkno);
0616 }
0617 return 0;
0618 }
0619
0620
0621
0622
0623
0624
0625
0626
0627 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
0628 {
0629 return 0;
0630 }
0631
0632
0633
0634
0635
0636
0637
0638
0639 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
0640 {
0641 struct gfs2_inode *ip = gl->gl_object;
0642 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0643
0644 if (!remote || sb_rdonly(sdp->sd_vfs))
0645 return;
0646
0647 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
0648 gl->gl_state == LM_ST_SHARED && ip) {
0649 gl->gl_lockref.count++;
0650 if (!queue_delayed_work(gfs2_delete_workqueue,
0651 &gl->gl_delete, 0))
0652 gl->gl_lockref.count--;
0653 }
0654 }
0655
0656 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
0657 {
0658 return !gfs2_delete_work_queued(gl);
0659 }
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 static void inode_go_free(struct gfs2_glock *gl)
0670 {
0671
0672
0673 if (!test_bit(GLF_FREEING, &gl->gl_flags))
0674 return;
0675 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
0676 wake_up_bit(&gl->gl_flags, GLF_FREEING);
0677 }
0678
0679
0680
0681
0682
0683
0684
0685 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
0686 {
0687 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
0688
0689
0690
0691 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
0692 return;
0693
0694
0695
0696
0697 clear_bit(GLF_DEMOTE, &gl->gl_flags);
0698 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
0699
0700
0701 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
0702 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
0703 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
0704 return;
0705
0706
0707
0708 if (gl->gl_demote_state != LM_ST_UNLOCKED)
0709 return;
0710
0711 if (sdp->sd_args.ar_spectator) {
0712 fs_warn(sdp, "Spectator node cannot recover journals.\n");
0713 return;
0714 }
0715
0716 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
0717 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
0718
0719
0720
0721
0722
0723
0724
0725 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
0726 }
0727
0728 const struct gfs2_glock_operations gfs2_meta_glops = {
0729 .go_type = LM_TYPE_META,
0730 .go_flags = GLOF_NONDISK,
0731 };
0732
0733 const struct gfs2_glock_operations gfs2_inode_glops = {
0734 .go_sync = inode_go_sync,
0735 .go_inval = inode_go_inval,
0736 .go_demote_ok = inode_go_demote_ok,
0737 .go_instantiate = inode_go_instantiate,
0738 .go_held = inode_go_held,
0739 .go_dump = inode_go_dump,
0740 .go_type = LM_TYPE_INODE,
0741 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
0742 .go_free = inode_go_free,
0743 };
0744
0745 const struct gfs2_glock_operations gfs2_rgrp_glops = {
0746 .go_sync = rgrp_go_sync,
0747 .go_inval = rgrp_go_inval,
0748 .go_instantiate = gfs2_rgrp_go_instantiate,
0749 .go_dump = gfs2_rgrp_go_dump,
0750 .go_type = LM_TYPE_RGRP,
0751 .go_flags = GLOF_LVB,
0752 };
0753
0754 const struct gfs2_glock_operations gfs2_freeze_glops = {
0755 .go_sync = freeze_go_sync,
0756 .go_xmote_bh = freeze_go_xmote_bh,
0757 .go_demote_ok = freeze_go_demote_ok,
0758 .go_type = LM_TYPE_NONDISK,
0759 .go_flags = GLOF_NONDISK,
0760 };
0761
0762 const struct gfs2_glock_operations gfs2_iopen_glops = {
0763 .go_type = LM_TYPE_IOPEN,
0764 .go_callback = iopen_go_callback,
0765 .go_dump = inode_go_dump,
0766 .go_demote_ok = iopen_go_demote_ok,
0767 .go_flags = GLOF_LRU | GLOF_NONDISK,
0768 .go_subclass = 1,
0769 };
0770
0771 const struct gfs2_glock_operations gfs2_flock_glops = {
0772 .go_type = LM_TYPE_FLOCK,
0773 .go_flags = GLOF_LRU | GLOF_NONDISK,
0774 };
0775
0776 const struct gfs2_glock_operations gfs2_nondisk_glops = {
0777 .go_type = LM_TYPE_NONDISK,
0778 .go_flags = GLOF_NONDISK,
0779 .go_callback = nondisk_go_callback,
0780 };
0781
0782 const struct gfs2_glock_operations gfs2_quota_glops = {
0783 .go_type = LM_TYPE_QUOTA,
0784 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
0785 };
0786
0787 const struct gfs2_glock_operations gfs2_journal_glops = {
0788 .go_type = LM_TYPE_JOURNAL,
0789 .go_flags = GLOF_NONDISK,
0790 };
0791
0792 const struct gfs2_glock_operations *gfs2_glops_list[] = {
0793 [LM_TYPE_META] = &gfs2_meta_glops,
0794 [LM_TYPE_INODE] = &gfs2_inode_glops,
0795 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
0796 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
0797 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
0798 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
0799 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
0800 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
0801 };
0802