0001
0002
0003
0004
0005
0006
0007 #ifndef __GLOCK_DOT_H__
0008 #define __GLOCK_DOT_H__
0009
0010 #include <linux/sched.h>
0011 #include <linux/parser.h>
0012 #include "incore.h"
0013 #include "util.h"
0014
0015
0016
0017 enum {
0018 Opt_jid,
0019 Opt_id,
0020 Opt_first,
0021 Opt_nodir,
0022 Opt_err,
0023 };
0024
0025
0026
0027
0028
0029 #define LM_TYPE_RESERVED 0x00
0030 #define LM_TYPE_NONDISK 0x01
0031 #define LM_TYPE_INODE 0x02
0032 #define LM_TYPE_RGRP 0x03
0033 #define LM_TYPE_META 0x04
0034 #define LM_TYPE_IOPEN 0x05
0035 #define LM_TYPE_FLOCK 0x06
0036 #define LM_TYPE_PLOCK 0x07
0037 #define LM_TYPE_QUOTA 0x08
0038 #define LM_TYPE_JOURNAL 0x09
0039
0040
0041
0042
0043
0044
0045
0046
0047 #define LM_ST_UNLOCKED 0
0048 #define LM_ST_EXCLUSIVE 1
0049 #define LM_ST_DEFERRED 2
0050 #define LM_ST_SHARED 3
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 #define LM_FLAG_TRY 0x0001
0086 #define LM_FLAG_TRY_1CB 0x0002
0087 #define LM_FLAG_NOEXP 0x0004
0088 #define LM_FLAG_ANY 0x0008
0089 #define LM_FLAG_PRIORITY 0x0010
0090 #define LM_FLAG_NODE_SCOPE 0x0020
0091 #define GL_ASYNC 0x0040
0092 #define GL_EXACT 0x0080
0093 #define GL_SKIP 0x0100
0094 #define GL_NOCACHE 0x0400
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 #define LM_OUT_ST_MASK 0x00000003
0108 #define LM_OUT_CANCELED 0x00000008
0109 #define LM_OUT_ERROR 0x00000004
0110
0111
0112
0113
0114
0115 #define LM_RD_GAVEUP 308
0116 #define LM_RD_SUCCESS 309
0117
0118 #define GLR_TRYFAILED 13
0119
0120 #define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
0121 #define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
0122 #define GL_GLOCK_MIN_HOLD (long)(10)
0123 #define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
0124 #define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
0125
0126 struct lm_lockops {
0127 const char *lm_proto_name;
0128 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
0129 void (*lm_first_done) (struct gfs2_sbd *sdp);
0130 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
0131 unsigned int result);
0132 void (*lm_unmount) (struct gfs2_sbd *sdp);
0133 void (*lm_withdraw) (struct gfs2_sbd *sdp);
0134 void (*lm_put_lock) (struct gfs2_glock *gl);
0135 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
0136 unsigned int flags);
0137 void (*lm_cancel) (struct gfs2_glock *gl);
0138 const match_table_t *lm_tokens;
0139 };
0140
0141 struct gfs2_glock_aspace {
0142 struct gfs2_glock glock;
0143 struct address_space mapping;
0144 };
0145
0146 extern struct workqueue_struct *gfs2_delete_workqueue;
0147 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
0148 {
0149 struct gfs2_holder *gh;
0150 struct pid *pid;
0151
0152
0153 spin_lock(&gl->gl_lockref.lock);
0154 pid = task_pid(current);
0155 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
0156 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
0157 break;
0158 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
0159 continue;
0160 if (gh->gh_owner_pid == pid)
0161 goto out;
0162 }
0163 gh = NULL;
0164 out:
0165 spin_unlock(&gl->gl_lockref.lock);
0166
0167 return gh;
0168 }
0169
0170 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
0171 {
0172 return gl->gl_state == LM_ST_EXCLUSIVE;
0173 }
0174
0175 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
0176 {
0177 return gl->gl_state == LM_ST_DEFERRED;
0178 }
0179
0180 static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
0181 {
0182 return gl->gl_state == LM_ST_SHARED;
0183 }
0184
0185 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
0186 {
0187 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
0188 struct gfs2_glock_aspace *gla =
0189 container_of(gl, struct gfs2_glock_aspace, glock);
0190 return &gla->mapping;
0191 }
0192 return NULL;
0193 }
0194
0195 extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
0196 const struct gfs2_glock_operations *glops,
0197 int create, struct gfs2_glock **glp);
0198 extern void gfs2_glock_hold(struct gfs2_glock *gl);
0199 extern void gfs2_glock_put(struct gfs2_glock *gl);
0200 extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
0201
0202 extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
0203 u16 flags, struct gfs2_holder *gh,
0204 unsigned long ip);
0205 static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
0206 u16 flags, struct gfs2_holder *gh) {
0207 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
0208 }
0209
0210 extern void gfs2_holder_reinit(unsigned int state, u16 flags,
0211 struct gfs2_holder *gh);
0212 extern void gfs2_holder_uninit(struct gfs2_holder *gh);
0213 extern int gfs2_glock_nq(struct gfs2_holder *gh);
0214 extern int gfs2_glock_poll(struct gfs2_holder *gh);
0215 extern int gfs2_instantiate(struct gfs2_holder *gh);
0216 extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
0217 extern int gfs2_glock_wait(struct gfs2_holder *gh);
0218 extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
0219 extern void gfs2_glock_dq(struct gfs2_holder *gh);
0220 extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
0221 extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
0222 extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
0223 const struct gfs2_glock_operations *glops,
0224 unsigned int state, u16 flags,
0225 struct gfs2_holder *gh);
0226 extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
0227 extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
0228 extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
0229 bool fsid);
0230 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
0231 gfs2_dump_glock(NULL, gl, true); \
0232 BUG(); } } while(0)
0233 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
0234 gfs2_dump_glock(NULL, gl, true); \
0235 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
0236 while (0)
0237 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
0238 gfs2_dump_glock(NULL, gl, true); \
0239 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
0240 while (0)
0241
0242 extern __printf(2, 3)
0243 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
0256 unsigned int state, u16 flags,
0257 struct gfs2_holder *gh)
0258 {
0259 int error;
0260
0261 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
0262
0263 error = gfs2_glock_nq(gh);
0264 if (error)
0265 gfs2_holder_uninit(gh);
0266
0267 return error;
0268 }
0269
0270 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
0271 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
0272 extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
0273 extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
0274 extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
0275 extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
0276 extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
0277 extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
0278 extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
0279 extern void gfs2_glock_free(struct gfs2_glock *gl);
0280
0281 extern int __init gfs2_glock_init(void);
0282 extern void gfs2_glock_exit(void);
0283
0284 extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
0285 extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
0286 extern void gfs2_register_debugfs(void);
0287 extern void gfs2_unregister_debugfs(void);
0288
0289 extern const struct lm_lockops gfs2_dlm_ops;
0290
0291 static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
0292 {
0293 gh->gh_gl = NULL;
0294 }
0295
0296 static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
0297 {
0298 return gh->gh_gl;
0299 }
0300
0301 static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
0302 {
0303 return !list_empty(&gh->gh_list);
0304 }
0305
0306
0307
0308
0309
0310
0311 static inline void glock_set_object(struct gfs2_glock *gl, void *object)
0312 {
0313 spin_lock(&gl->gl_lockref.lock);
0314 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
0315 gfs2_dump_glock(NULL, gl, true);
0316 gl->gl_object = object;
0317 spin_unlock(&gl->gl_lockref.lock);
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
0339 {
0340 spin_lock(&gl->gl_lockref.lock);
0341 if (gl->gl_object == object)
0342 gl->gl_object = NULL;
0343 spin_unlock(&gl->gl_lockref.lock);
0344 }
0345
0346 static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
0347 {
0348 struct gfs2_glock *gl = gh->gh_gl;
0349
0350 spin_lock(&gl->gl_lockref.lock);
0351 set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
0352 spin_unlock(&gl->gl_lockref.lock);
0353 }
0354
0355 static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
0356 {
0357 struct gfs2_glock *gl = gh->gh_gl;
0358
0359 spin_lock(&gl->gl_lockref.lock);
0360 clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
0361 spin_unlock(&gl->gl_lockref.lock);
0362 }
0363
0364 extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
0365 extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
0366
0367 #endif