Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 2008 Oracle.  All rights reserved.
0004  */
0005 
0006 #ifndef BTRFS_LOCKING_H
0007 #define BTRFS_LOCKING_H
0008 
0009 #include <linux/atomic.h>
0010 #include <linux/wait.h>
0011 #include <linux/percpu_counter.h>
0012 #include "extent_io.h"
0013 
0014 #define BTRFS_WRITE_LOCK 1
0015 #define BTRFS_READ_LOCK 2
0016 
0017 /*
0018  * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
0019  * the time of this patch is 8, which is how many we use.  Keep this in mind if
0020  * you decide you want to add another subclass.
0021  */
0022 enum btrfs_lock_nesting {
0023     BTRFS_NESTING_NORMAL,
0024 
0025     /*
0026      * When we COW a block we are holding the lock on the original block,
0027      * and since our lockdep maps are rootid+level, this confuses lockdep
0028      * when we lock the newly allocated COW'd block.  Handle this by having
0029      * a subclass for COW'ed blocks so that lockdep doesn't complain.
0030      */
0031     BTRFS_NESTING_COW,
0032 
0033     /*
0034      * Oftentimes we need to lock adjacent nodes on the same level while
0035      * still holding the lock on the original node we searched to, such as
0036      * for searching forward or for split/balance.
0037      *
0038      * Because of this we need to indicate to lockdep that this is
0039      * acceptable by having a different subclass for each of these
0040      * operations.
0041      */
0042     BTRFS_NESTING_LEFT,
0043     BTRFS_NESTING_RIGHT,
0044 
0045     /*
0046      * When splitting we will be holding a lock on the left/right node when
0047      * we need to cow that node, thus we need a new set of subclasses for
0048      * these two operations.
0049      */
0050     BTRFS_NESTING_LEFT_COW,
0051     BTRFS_NESTING_RIGHT_COW,
0052 
0053     /*
0054      * When splitting we may push nodes to the left or right, but still use
0055      * the subsequent nodes in our path, keeping our locks on those adjacent
0056      * blocks.  Thus when we go to allocate a new split block we've already
0057      * used up all of our available subclasses, so this subclass exists to
0058      * handle this case where we need to allocate a new split block.
0059      */
0060     BTRFS_NESTING_SPLIT,
0061 
0062     /*
0063      * When promoting a new block to a root we need to have a special
0064      * subclass so we don't confuse lockdep, as it will appear that we are
0065      * locking a higher level node before a lower level one.  Copying also
0066      * has this problem as it appears we're locking the same block again
0067      * when we make a snapshot of an existing root.
0068      */
0069     BTRFS_NESTING_NEW_ROOT,
0070 
0071     /*
0072      * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
0073      * add this in here and add a static_assert to keep us from going over
0074      * the limit.  As of this writing we're limited to 8, and we're
0075      * definitely using 8, hence this check to keep us from messing up in
0076      * the future.
0077      */
0078     BTRFS_NESTING_MAX,
0079 };
0080 
0081 static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
0082           "too many lock subclasses defined");
0083 
0084 struct btrfs_path;
0085 
0086 void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
0087 void btrfs_tree_lock(struct extent_buffer *eb);
0088 void btrfs_tree_unlock(struct extent_buffer *eb);
0089 
0090 void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
0091 void btrfs_tree_read_lock(struct extent_buffer *eb);
0092 void btrfs_tree_read_unlock(struct extent_buffer *eb);
0093 int btrfs_try_tree_read_lock(struct extent_buffer *eb);
0094 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
0095 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
0096 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
0097 
0098 #ifdef CONFIG_BTRFS_DEBUG
0099 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
0100 {
0101     lockdep_assert_held_write(&eb->lock);
0102 }
0103 #else
0104 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
0105 #endif
0106 
0107 void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
0108 
0109 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
0110 {
0111     if (rw == BTRFS_WRITE_LOCK)
0112         btrfs_tree_unlock(eb);
0113     else if (rw == BTRFS_READ_LOCK)
0114         btrfs_tree_read_unlock(eb);
0115     else
0116         BUG();
0117 }
0118 
0119 struct btrfs_drew_lock {
0120     atomic_t readers;
0121     struct percpu_counter writers;
0122     wait_queue_head_t pending_writers;
0123     wait_queue_head_t pending_readers;
0124 };
0125 
0126 int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
0127 void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
0128 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
0129 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
0130 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
0131 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
0132 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
0133 
0134 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0135 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
0136 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
0137 #else
0138 static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
0139                     struct extent_buffer *eb, int level)
0140 {
0141 }
0142 static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
0143                            struct extent_buffer *eb)
0144 {
0145 }
0146 #endif
0147 
0148 #endif