mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2026-01-27 12:47:24 +01:00
bcachefs: Replace rcu_read_lock() with guards
The new guard(), scoped_guard() allow for more natural code. Some of the uses with creative flow control have been left. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
9cb49fbf73
commit
18dad454cd
|
|
@ -1000,14 +1000,11 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
|||
}
|
||||
|
||||
if (new_a->gen != old_a->gen) {
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
u8 *gen = bucket_gen(ca, new.k->p.offset);
|
||||
if (unlikely(!gen)) {
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!gen))
|
||||
goto invalid_bucket;
|
||||
}
|
||||
*gen = new_a->gen;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
|
||||
|
|
@ -1033,15 +1030,12 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
|||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) {
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bucket *g = gc_bucket(ca, new.k->p.offset);
|
||||
if (unlikely(!g)) {
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!g))
|
||||
goto invalid_bucket;
|
||||
}
|
||||
g->gen_valid = 1;
|
||||
g->gen = new_a->gen;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
err:
|
||||
fsck_err:
|
||||
|
|
@ -1117,13 +1111,12 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
|
|||
bucket->offset = 0;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
*ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
|
||||
if (*ca) {
|
||||
*bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
|
||||
bch2_dev_get(*ca);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return *ca != NULL;
|
||||
}
|
||||
|
|
@ -2514,7 +2507,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
|
|||
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_member_device_rcu(c, ca, NULL) {
|
||||
struct block_device *bdev = READ_ONCE(ca->disk_sb.bdev);
|
||||
if (bdev)
|
||||
|
|
@ -2559,7 +2552,6 @@ void bch2_recalc_capacity(struct bch_fs *c)
|
|||
bucket_size_max = max_t(unsigned, bucket_size_max,
|
||||
ca->mi.bucket_size);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
bch2_set_ra_pages(c, ra_pages);
|
||||
|
||||
|
|
@ -2584,10 +2576,9 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *c)
|
|||
{
|
||||
u64 ret = U64_MAX;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_rw_member_rcu(c, ca)
|
||||
ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,11 +13,9 @@
|
|||
|
||||
static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
|
||||
bool ret = ca && bucket_valid(ca, pos.offset);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
return ca && bucket_valid(ca, pos.offset);
|
||||
}
|
||||
|
||||
static inline u64 bucket_to_u64(struct bpos bucket)
|
||||
|
|
|
|||
|
|
@ -69,10 +69,9 @@ const char * const bch2_watermarks[] = {
|
|||
|
||||
void bch2_reset_alloc_cursors(struct bch_fs *c)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
|
||||
|
|
@ -166,9 +165,8 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
|
|||
ARRAY_SIZE(c->open_buckets_partial));
|
||||
|
||||
spin_lock(&c->freelist_lock);
|
||||
rcu_read_lock();
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++;
|
||||
|
||||
ob->on_partial_list = true;
|
||||
c->open_buckets_partial[c->open_buckets_partial_nr++] =
|
||||
|
|
@ -873,9 +871,8 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
|||
i);
|
||||
ob->on_partial_list = false;
|
||||
|
||||
rcu_read_lock();
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
|
||||
|
||||
ret = add_new_bucket(c, req, ob);
|
||||
if (ret)
|
||||
|
|
@ -1057,9 +1054,8 @@ void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
|
|||
|
||||
ob->on_partial_list = false;
|
||||
|
||||
rcu_read_lock();
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--;
|
||||
|
||||
spin_unlock(&c->freelist_lock);
|
||||
bch2_open_bucket_put(c, ob);
|
||||
|
|
@ -1087,14 +1083,11 @@ static struct write_point *__writepoint_find(struct hlist_head *head,
|
|||
{
|
||||
struct write_point *wp;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
hlist_for_each_entry_rcu(wp, head, node)
|
||||
if (wp->write_point == write_point)
|
||||
goto out;
|
||||
wp = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return wp;
|
||||
return wp;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
|
||||
|
|
@ -1638,19 +1631,16 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
|
|||
|
||||
bch2_printbuf_make_room(&buf, 4096);
|
||||
|
||||
rcu_read_lock();
|
||||
buf.atomic++;
|
||||
|
||||
for_each_online_member_rcu(c, ca) {
|
||||
prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
|
||||
printbuf_indent_add(&buf, 2);
|
||||
bch2_dev_alloc_debug_to_text(&buf, ca);
|
||||
printbuf_indent_sub(&buf, 2);
|
||||
prt_newline(&buf);
|
||||
}
|
||||
|
||||
scoped_guard(rcu)
|
||||
for_each_online_member_rcu(c, ca) {
|
||||
prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
|
||||
printbuf_indent_add(&buf, 2);
|
||||
bch2_dev_alloc_debug_to_text(&buf, ca);
|
||||
printbuf_indent_sub(&buf, 2);
|
||||
prt_newline(&buf);
|
||||
}
|
||||
--buf.atomic;
|
||||
rcu_read_unlock();
|
||||
|
||||
prt_printf(&buf, "Copygc debug:\n");
|
||||
printbuf_indent_add(&buf, 2);
|
||||
|
|
|
|||
|
|
@ -48,18 +48,20 @@ void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bke
|
|||
{
|
||||
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
|
||||
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
|
||||
if (ca) {
|
||||
u32 bucket_offset;
|
||||
struct bpos bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset);
|
||||
rcu_read_unlock();
|
||||
prt_printf(out, "bucket=%llu:%llu:%u ", bucket.inode, bucket.offset, bucket_offset);
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
prt_printf(out, "sector=%llu:%llu ", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT);
|
||||
struct bch_dev *ca;
|
||||
u32 bucket_offset;
|
||||
struct bpos bucket;
|
||||
scoped_guard(rcu) {
|
||||
ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
|
||||
if (ca)
|
||||
bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset);
|
||||
}
|
||||
|
||||
if (ca)
|
||||
prt_printf(out, "bucket=%llu:%llu:%u ", bucket.inode, bucket.offset, bucket_offset);
|
||||
else
|
||||
prt_printf(out, "sector=%llu:%llu ", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT);
|
||||
|
||||
bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level);
|
||||
prt_str(out, " data_type=");
|
||||
bch2_prt_data_type(out, bp.v->data_type);
|
||||
|
|
@ -591,6 +593,7 @@ check_existing_bp:
|
|||
bkey_for_each_ptr(other_extent_ptrs, ptr)
|
||||
if (ptr->dev == bp->k.p.inode &&
|
||||
dev_ptr_stale_rcu(ca, ptr)) {
|
||||
rcu_read_unlock();
|
||||
ret = drop_dev_and_update(trans, other_bp.v->btree_id,
|
||||
other_extent, bp->k.p.inode);
|
||||
if (ret)
|
||||
|
|
@ -679,26 +682,23 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
|
|||
if (p.ptr.dev == BCH_SB_MEMBER_INVALID)
|
||||
continue;
|
||||
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
|
||||
if (!ca) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
bool empty;
|
||||
{
|
||||
/* scoped_guard() is a loop, so it breaks continue */
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
|
||||
if (p.ptr.cached && dev_ptr_stale_rcu(ca, &p.ptr)) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
if (p.ptr.cached && dev_ptr_stale_rcu(ca, &p.ptr))
|
||||
continue;
|
||||
|
||||
u64 b = PTR_BUCKET_NR(ca, &p.ptr);
|
||||
if (!bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b)) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
u64 b = PTR_BUCKET_NR(ca, &p.ptr);
|
||||
if (!bch2_bucket_bitmap_test(&ca->bucket_backpointer_mismatch, b))
|
||||
continue;
|
||||
|
||||
bool empty = bch2_bucket_bitmap_test(&ca->bucket_backpointer_empty, b);
|
||||
rcu_read_unlock();
|
||||
empty = bch2_bucket_bitmap_test(&ca->bucket_backpointer_empty, b);
|
||||
}
|
||||
|
||||
struct bkey_i_backpointer bp;
|
||||
bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp);
|
||||
|
|
@ -981,7 +981,7 @@ static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k)
|
|||
case KEY_TYPE_btree_ptr_v2: {
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bpos pos = bkey_s_c_to_btree_ptr_v2(k).v->min_key;
|
||||
while (pos.inode <= k.k->p.inode) {
|
||||
if (pos.inode >= c->sb.nr_devices)
|
||||
|
|
@ -1009,7 +1009,6 @@ static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k)
|
|||
next:
|
||||
pos = SPOS(pos.inode + 1, 0, 0);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,11 +53,10 @@ static inline struct bpos bp_pos_to_bucket_and_offset(const struct bch_dev *ca,
|
|||
|
||||
static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp_pos.inode);
|
||||
if (ca)
|
||||
*bucket = bp_pos_to_bucket(ca, bp_pos);
|
||||
rcu_read_unlock();
|
||||
return ca != NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1093,42 +1093,41 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
|||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct bkey_i *u;
|
||||
int ret;
|
||||
|
||||
if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
|
||||
return -EROFS;
|
||||
|
||||
rcu_read_lock();
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
bool too_stale = false;
|
||||
scoped_guard(rcu) {
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
|
||||
if (dev_ptr_stale(ca, ptr) > 16) {
|
||||
rcu_read_unlock();
|
||||
goto update;
|
||||
too_stale |= dev_ptr_stale(ca, ptr) > 16;
|
||||
}
|
||||
|
||||
if (!too_stale)
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
|
||||
u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
|
||||
if (gen_after(*gen, ptr->gen))
|
||||
*gen = ptr->gen;
|
||||
}
|
||||
}
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
if (too_stale) {
|
||||
struct bkey_i *u = bch2_bkey_make_mut(trans, iter, &k, 0);
|
||||
int ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
|
||||
if (gen_after(*gen, ptr->gen))
|
||||
*gen = ptr->gen;
|
||||
bch2_extent_normalize(c, bkey_i_to_s(u));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
update:
|
||||
u = bch2_bkey_make_mut(trans, iter, &k, 0);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_extent_normalize(c, bkey_i_to_s(u));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1325,14 +1325,13 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
|||
|
||||
btree_node_reset_sib_u64s(b);
|
||||
|
||||
rcu_read_lock();
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
|
||||
struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
|
||||
scoped_guard(rcu)
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
|
||||
struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
|
||||
|
||||
if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
}
|
||||
|
||||
if (!ptr_written)
|
||||
set_btree_node_need_rewrite(b);
|
||||
|
|
|
|||
|
|
@ -3568,13 +3568,12 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
|
|||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
struct six_lock_count c = six_lock_counts(&b->lock);
|
||||
struct task_struct *owner;
|
||||
pid_t pid;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = READ_ONCE(b->lock.owner);
|
||||
pid = owner ? owner->pid : 0;
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu) {
|
||||
struct task_struct *owner = READ_ONCE(b->lock.owner);
|
||||
pid = owner ? owner->pid : 0;
|
||||
}
|
||||
|
||||
prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
|
||||
bch2_btree_id_to_text(out, b->btree_id);
|
||||
|
|
@ -3603,7 +3602,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
|||
prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
|
||||
|
||||
/* trans->paths is rcu protected vs. freeing */
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
out->atomic++;
|
||||
|
||||
struct btree_path *paths = rcu_dereference(trans->paths);
|
||||
|
|
@ -3646,7 +3645,6 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
|||
}
|
||||
out:
|
||||
--out->atomic;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void bch2_fs_btree_iter_exit(struct bch_fs *c)
|
||||
|
|
|
|||
|
|
@ -457,11 +457,9 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
|
|||
|
||||
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
|
||||
{
|
||||
struct bkey_s_c ret = bkey_s_c_null;
|
||||
|
||||
journal_iter_verify(iter);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
while (iter->idx < iter->keys->size) {
|
||||
struct journal_key *k = iter->keys->data + iter->idx;
|
||||
|
||||
|
|
@ -470,19 +468,16 @@ static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
|
|||
break;
|
||||
BUG_ON(cmp);
|
||||
|
||||
if (!k->overwritten) {
|
||||
ret = bkey_i_to_s_c(k->k);
|
||||
break;
|
||||
}
|
||||
if (!k->overwritten)
|
||||
return bkey_i_to_s_c(k->k);
|
||||
|
||||
if (k->overwritten_range)
|
||||
iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end);
|
||||
else
|
||||
bch2_journal_iter_advance(iter);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return bkey_s_c_null;
|
||||
}
|
||||
|
||||
static void bch2_journal_iter_exit(struct journal_iter *iter)
|
||||
|
|
|
|||
|
|
@ -187,27 +187,23 @@ lock:
|
|||
static struct bkey_cached *
|
||||
bkey_cached_reuse(struct btree_key_cache *c)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
|
||||
guard(rcu)();
|
||||
struct bucket_table *tbl = rht_dereference_rcu(c->table.tbl, &c->table);
|
||||
struct rhash_head *pos;
|
||||
struct bkey_cached *ck;
|
||||
unsigned i;
|
||||
|
||||
rcu_read_lock();
|
||||
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
|
||||
for (i = 0; i < tbl->size; i++)
|
||||
for (unsigned i = 0; i < tbl->size; i++)
|
||||
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
||||
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
||||
bkey_cached_lock_for_evict(ck)) {
|
||||
if (bkey_cached_evict(c, ck))
|
||||
goto out;
|
||||
return ck;
|
||||
six_unlock_write(&ck->c.lock);
|
||||
six_unlock_intent(&ck->c.lock);
|
||||
}
|
||||
}
|
||||
ck = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ck;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int btree_key_cache_create(struct btree_trans *trans,
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
|
|||
lock_graph_down(&g, trans);
|
||||
|
||||
/* trans->paths is rcu protected vs. freeing */
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
if (cycle)
|
||||
cycle->atomic++;
|
||||
next:
|
||||
|
|
@ -409,7 +409,6 @@ up:
|
|||
out:
|
||||
if (cycle)
|
||||
--cycle->atomic;
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -299,9 +299,8 @@ int bch2_check_fix_ptrs(struct btree_trans *trans,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
rcu_read_lock();
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
|
||||
|
||||
if (level) {
|
||||
/*
|
||||
|
|
@ -310,14 +309,11 @@ int bch2_check_fix_ptrs(struct btree_trans *trans,
|
|||
* sort it out:
|
||||
*/
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
rcu_read_lock();
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||
|
||||
ptr->gen = g->gen;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
ptr->gen = PTR_GC_BUCKET(ca, ptr)->gen;
|
||||
}
|
||||
} else {
|
||||
struct bkey_ptrs ptrs;
|
||||
union bch_extent_entry *entry;
|
||||
|
|
|
|||
|
|
@ -84,10 +84,8 @@ static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
|
|||
|
||||
static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
|
||||
{
|
||||
rcu_read_lock();
|
||||
int ret = bucket_gen_get_rcu(ca, b);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return bucket_gen_get_rcu(ca, b);
|
||||
}
|
||||
|
||||
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
||||
|
|
@ -156,10 +154,8 @@ static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_
|
|||
*/
|
||||
static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
rcu_read_lock();
|
||||
int ret = dev_ptr_stale_rcu(ca, ptr);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return dev_ptr_stale_rcu(ca, ptr);
|
||||
}
|
||||
|
||||
/* Device usage: */
|
||||
|
|
|
|||
|
|
@ -613,13 +613,10 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
|
|||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_online_member_rcu(c, ca)
|
||||
if (ca->dev == dev) {
|
||||
rcu_read_unlock();
|
||||
if (ca->dev == dev)
|
||||
return ca->dev_idx;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return -BCH_ERR_ENOENT_dev_idx_not_found;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -376,21 +376,21 @@ restart_drop_conflicting_replicas:
|
|||
bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
|
||||
|
||||
/* Now, drop excess replicas: */
|
||||
rcu_read_lock();
|
||||
scoped_guard(rcu) {
|
||||
restart_drop_extra_replicas:
|
||||
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
|
||||
unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
|
||||
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
|
||||
unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
|
||||
|
||||
if (!p.ptr.cached &&
|
||||
durability - ptr_durability >= m->op.opts.data_replicas) {
|
||||
durability -= ptr_durability;
|
||||
if (!p.ptr.cached &&
|
||||
durability - ptr_durability >= m->op.opts.data_replicas) {
|
||||
durability -= ptr_durability;
|
||||
|
||||
bch2_extent_ptr_set_cached(c, &m->op.opts,
|
||||
bkey_i_to_s(insert), &entry->ptr);
|
||||
goto restart_drop_extra_replicas;
|
||||
bch2_extent_ptr_set_cached(c, &m->op.opts,
|
||||
bkey_i_to_s(insert), &entry->ptr);
|
||||
goto restart_drop_extra_replicas;
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Finally, add the pointers we just wrote: */
|
||||
extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
|
||||
|
|
@ -782,7 +782,8 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m)
|
|||
darray_for_each(m->op.devs_have, i)
|
||||
__clear_bit(*i, devs.d);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
|
||||
unsigned nr_replicas = 0, i;
|
||||
for_each_set_bit(i, devs.d, BCH_SB_MEMBERS_MAX) {
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, i);
|
||||
|
|
@ -799,7 +800,6 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m)
|
|||
if (nr_replicas >= m->op.nr_replicas)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!nr_replicas)
|
||||
return -BCH_ERR_data_update_done_no_rw_devs;
|
||||
|
|
@ -876,7 +876,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
|||
unsigned ptr_bit = 1;
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
||||
if (!p.ptr.cached) {
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
if (ptr_bit & m->data_opts.rewrite_ptrs) {
|
||||
if (crc_is_compressed(p.crc))
|
||||
reserve_sectors += k.k->size;
|
||||
|
|
@ -887,7 +887,6 @@ int bch2_data_update_init(struct btree_trans *trans,
|
|||
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
|
||||
durability_have += bch2_extent_ptr_durability(c, &p);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -510,27 +510,27 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
|
|||
i->ret = 0;
|
||||
|
||||
do {
|
||||
struct bucket_table *tbl;
|
||||
struct rhash_head *pos;
|
||||
struct btree *b;
|
||||
|
||||
ret = bch2_debugfs_flush_buf(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rcu_read_lock();
|
||||
i->buf.atomic++;
|
||||
tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
|
||||
&c->btree_cache.table);
|
||||
if (i->iter < tbl->size) {
|
||||
rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
|
||||
bch2_cached_btree_node_to_text(&i->buf, c, b);
|
||||
i->iter++;
|
||||
} else {
|
||||
done = true;
|
||||
scoped_guard(rcu) {
|
||||
struct bucket_table *tbl =
|
||||
rht_dereference_rcu(c->btree_cache.table.tbl,
|
||||
&c->btree_cache.table);
|
||||
if (i->iter < tbl->size) {
|
||||
struct rhash_head *pos;
|
||||
struct btree *b;
|
||||
|
||||
rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
|
||||
bch2_cached_btree_node_to_text(&i->buf, c, b);
|
||||
i->iter++;
|
||||
} else {
|
||||
done = true;
|
||||
}
|
||||
}
|
||||
--i->buf.atomic;
|
||||
rcu_read_unlock();
|
||||
} while (!done);
|
||||
|
||||
if (i->buf.allocation_failure)
|
||||
|
|
|
|||
|
|
@ -897,8 +897,8 @@ int bch2_accounting_read(struct bch_fs *c)
|
|||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
rcu_read_lock();
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type: {
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
|
||||
|
|
@ -910,9 +910,9 @@ int bch2_accounting_read(struct bch_fs *c)
|
|||
k.dev_data_type.data_type == BCH_DATA_journal)
|
||||
usage->hidden += v[0] * ca->mi.bucket_size;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
fsck_err:
|
||||
|
|
@ -1006,18 +1006,17 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
|
|||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(&base, acc_k.replicas.data_type, a.v->d[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type: {
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev);
|
||||
if (!ca) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
{
|
||||
guard(rcu)(); /* scoped guard is a loop, and doesn't play nicely with continue */
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
|
||||
v[0] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].buckets);
|
||||
v[1] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].sectors);
|
||||
v[2] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].fragmented);
|
||||
rcu_read_unlock();
|
||||
v[0] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].buckets);
|
||||
v[1] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].sectors);
|
||||
v[2] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].fragmented);
|
||||
}
|
||||
|
||||
if (memcmp(a.v->d, v, 3 * sizeof(u64))) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
|
@ -1032,7 +1031,6 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
|
|||
mismatch = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
0;
|
||||
})));
|
||||
|
|
|
|||
|
|
@ -174,17 +174,17 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans,
|
|||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(&trans->fs_usage_delta, acc_k.replicas.data_type, a.v->d[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
rcu_read_lock();
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type: {
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]);
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]);
|
||||
this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].fragmented, a.v->d[2]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned idx;
|
||||
|
|
|
|||
|
|
@ -170,36 +170,28 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
|
|||
const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
|
||||
{
|
||||
struct target t = target_decode(target);
|
||||
struct bch_devs_mask *devs;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
|
||||
switch (t.type) {
|
||||
case TARGET_NULL:
|
||||
devs = NULL;
|
||||
break;
|
||||
return NULL;
|
||||
case TARGET_DEV: {
|
||||
struct bch_dev *ca = t.dev < c->sb.nr_devices
|
||||
? rcu_dereference(c->devs[t.dev])
|
||||
: NULL;
|
||||
devs = ca ? &ca->self : NULL;
|
||||
break;
|
||||
return ca ? &ca->self : NULL;
|
||||
}
|
||||
case TARGET_GROUP: {
|
||||
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
|
||||
|
||||
devs = g && t.group < g->nr && !g->entries[t.group].deleted
|
||||
return g && t.group < g->nr && !g->entries[t.group].deleted
|
||||
? &g->entries[t.group].devs
|
||||
: NULL;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return devs;
|
||||
}
|
||||
|
||||
bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
|
||||
|
|
@ -384,7 +376,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
|
|||
bch2_printbuf_make_room(out, 4096);
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
|
||||
|
||||
for (unsigned i = 0; i < (g ? g->nr : 0); i++) {
|
||||
|
|
@ -405,16 +397,14 @@ next:
|
|||
prt_newline(out);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
out->atomic--;
|
||||
}
|
||||
|
||||
void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
|
||||
{
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
__bch2_disk_path_to_text(out, rcu_dereference(c->disk_groups), v),
|
||||
rcu_read_unlock();
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
|
|
@ -535,13 +525,11 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
|
|||
switch (t.type) {
|
||||
case TARGET_NULL:
|
||||
prt_printf(out, "none");
|
||||
break;
|
||||
return;
|
||||
case TARGET_DEV: {
|
||||
struct bch_dev *ca;
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
ca = t.dev < c->sb.nr_devices
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = t.dev < c->sb.nr_devices
|
||||
? rcu_dereference(c->devs[t.dev])
|
||||
: NULL;
|
||||
|
||||
|
|
@ -552,13 +540,12 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
|
|||
else
|
||||
prt_printf(out, "invalid device %u", t.dev);
|
||||
|
||||
rcu_read_unlock();
|
||||
out->atomic--;
|
||||
break;
|
||||
return;
|
||||
}
|
||||
case TARGET_GROUP:
|
||||
bch2_disk_path_to_text(out, c, t.group);
|
||||
break;
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1578,26 +1578,26 @@ static struct ec_stripe_new *ec_new_stripe_alloc(struct bch_fs *c, struct ec_str
|
|||
static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
|
||||
{
|
||||
struct bch_devs_mask devs = h->devs;
|
||||
unsigned nr_devs, nr_devs_with_durability;
|
||||
|
||||
rcu_read_lock();
|
||||
h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
|
||||
? group_to_target(h->disk_label - 1)
|
||||
: 0);
|
||||
unsigned nr_devs = dev_mask_nr(&h->devs);
|
||||
scoped_guard(rcu) {
|
||||
h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
|
||||
? group_to_target(h->disk_label - 1)
|
||||
: 0);
|
||||
nr_devs = dev_mask_nr(&h->devs);
|
||||
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (!ca->mi.durability)
|
||||
__clear_bit(ca->dev_idx, h->devs.d);
|
||||
unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (!ca->mi.durability)
|
||||
__clear_bit(ca->dev_idx, h->devs.d);
|
||||
nr_devs_with_durability = dev_mask_nr(&h->devs);
|
||||
|
||||
h->blocksize = pick_blocksize(c, &h->devs);
|
||||
h->blocksize = pick_blocksize(c, &h->devs);
|
||||
|
||||
h->nr_active_devs = 0;
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (ca->mi.bucket_size == h->blocksize)
|
||||
h->nr_active_devs++;
|
||||
|
||||
rcu_read_unlock();
|
||||
h->nr_active_devs = 0;
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (ca->mi.bucket_size == h->blocksize)
|
||||
h->nr_active_devs++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we only have redundancy + 1 devices, we're better off with just
|
||||
|
|
@ -2141,15 +2141,14 @@ int bch2_invalidate_stripe_to_dev(struct btree_trans *trans,
|
|||
|
||||
unsigned nr_good = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
if (ptr->dev == dev_idx)
|
||||
ptr->dev = BCH_SB_MEMBER_INVALID;
|
||||
scoped_guard(rcu)
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
if (ptr->dev == dev_idx)
|
||||
ptr->dev = BCH_SB_MEMBER_INVALID;
|
||||
|
||||
struct bch_dev *ca = bch2_dev_rcu(trans->c, ptr->dev);
|
||||
nr_good += ca && ca->mi.state != BCH_MEMBER_STATE_failed;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(trans->c, ptr->dev);
|
||||
nr_good += ca && ca->mi.state != BCH_MEMBER_STATE_failed;
|
||||
}
|
||||
|
||||
if (nr_good < s->v.nr_blocks && !(flags & BCH_FORCE_IF_DATA_DEGRADED))
|
||||
return -BCH_ERR_remove_would_lose_data;
|
||||
|
|
|
|||
|
|
@ -65,15 +65,15 @@ void bch2_io_failures_to_text(struct printbuf *out,
|
|||
continue;
|
||||
|
||||
bch2_printbuf_make_room(out, 1024);
|
||||
rcu_read_lock();
|
||||
out->atomic++;
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, f->dev);
|
||||
if (ca)
|
||||
prt_str(out, ca->name);
|
||||
else
|
||||
prt_printf(out, "(invalid device %u)", f->dev);
|
||||
scoped_guard(rcu) {
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, f->dev);
|
||||
if (ca)
|
||||
prt_str(out, ca->name);
|
||||
else
|
||||
prt_printf(out, "(invalid device %u)", f->dev);
|
||||
}
|
||||
--out->atomic;
|
||||
rcu_read_unlock();
|
||||
|
||||
prt_char(out, ' ');
|
||||
|
||||
|
|
@ -407,6 +407,8 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
|||
lp.crc = bch2_extent_crc_unpack(l.k, NULL);
|
||||
rp.crc = bch2_extent_crc_unpack(r.k, NULL);
|
||||
|
||||
guard(rcu)();
|
||||
|
||||
while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
|
||||
__bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
|
||||
if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
|
||||
|
|
@ -418,10 +420,8 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
|||
return false;
|
||||
|
||||
/* Extents may not straddle buckets: */
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
|
||||
bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!same_bucket)
|
||||
return false;
|
||||
|
|
@ -838,11 +838,9 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
|
|||
struct extent_ptr_decoded p;
|
||||
unsigned durability = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
durability += bch2_extent_ptr_durability(c, &p);
|
||||
rcu_read_unlock();
|
||||
|
||||
return durability;
|
||||
}
|
||||
|
||||
|
|
@ -853,12 +851,10 @@ static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
|
|||
struct extent_ptr_decoded p;
|
||||
unsigned durability = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
|
||||
durability += bch2_extent_ptr_durability(c, &p);
|
||||
rcu_read_unlock();
|
||||
|
||||
return durability;
|
||||
}
|
||||
|
||||
|
|
@ -1015,20 +1011,16 @@ bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
|
|||
{
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct bch_dev *ca;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
if (bch2_dev_in_target(c, ptr->dev, target) &&
|
||||
(ca = bch2_dev_rcu(c, ptr->dev)) &&
|
||||
(!ptr->cached ||
|
||||
!dev_ptr_stale_rcu(ca, ptr))) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
!dev_ptr_stale_rcu(ca, ptr)))
|
||||
return true;
|
||||
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
|
||||
|
|
@ -1142,7 +1134,7 @@ void bch2_extent_ptr_set_cached(struct bch_fs *c,
|
|||
bool have_cached_ptr;
|
||||
unsigned drop_dev = ptr->dev;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
restart_drop_ptrs:
|
||||
ptrs = bch2_bkey_ptrs(k);
|
||||
have_cached_ptr = false;
|
||||
|
|
@ -1175,10 +1167,8 @@ restart_drop_ptrs:
|
|||
goto drop;
|
||||
|
||||
ptr->cached = true;
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
drop:
|
||||
rcu_read_unlock();
|
||||
bch2_bkey_drop_ptr_noerror(k, ptr);
|
||||
}
|
||||
|
||||
|
|
@ -1194,12 +1184,11 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
|
|||
{
|
||||
struct bch_dev *ca;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bch2_bkey_drop_ptrs(k, ptr,
|
||||
ptr->cached &&
|
||||
(!(ca = bch2_dev_rcu(c, ptr->dev)) ||
|
||||
dev_ptr_stale_rcu(ca, ptr) > 0));
|
||||
rcu_read_unlock();
|
||||
|
||||
return bkey_deleted(k.k);
|
||||
}
|
||||
|
|
@ -1217,7 +1206,7 @@ bool bch2_extent_normalize_by_opts(struct bch_fs *c,
|
|||
struct bkey_ptrs ptrs;
|
||||
bool have_cached_ptr;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
restart_drop_ptrs:
|
||||
ptrs = bch2_bkey_ptrs(k);
|
||||
have_cached_ptr = false;
|
||||
|
|
@ -1230,7 +1219,6 @@ restart_drop_ptrs:
|
|||
}
|
||||
have_cached_ptr = true;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return bkey_deleted(k.k);
|
||||
}
|
||||
|
|
@ -1238,7 +1226,7 @@ restart_drop_ptrs:
|
|||
void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
|
||||
if (!ca) {
|
||||
prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
|
||||
|
|
@ -1262,7 +1250,6 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
|
|||
else if (stale)
|
||||
prt_printf(out, " invalid");
|
||||
}
|
||||
rcu_read_unlock();
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -71,12 +71,12 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
|
|||
memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
|
||||
|
||||
for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
|
||||
rcu_read_lock();
|
||||
ca = rcu_dereference(c->devs[dev]);
|
||||
if (ca && !enumerated_ref_tryget(&ca->io_ref[WRITE],
|
||||
BCH_DEV_WRITE_REF_nocow_flush))
|
||||
ca = NULL;
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu) {
|
||||
ca = rcu_dereference(c->devs[dev]);
|
||||
if (ca && !enumerated_ref_tryget(&ca->io_ref[WRITE],
|
||||
BCH_DEV_WRITE_REF_nocow_flush))
|
||||
ca = NULL;
|
||||
}
|
||||
|
||||
if (!ca)
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -2327,14 +2327,13 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
|
|||
struct bch_fs *c = root->d_sb->s_fs_info;
|
||||
bool first = true;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_online_member_rcu(c, ca) {
|
||||
if (!first)
|
||||
seq_putc(seq, ':');
|
||||
first = false;
|
||||
seq_puts(seq, ca->disk_sb.sb_name);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2531,16 +2530,16 @@ got_sb:
|
|||
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_online_member_rcu(c, ca) {
|
||||
struct block_device *bdev = ca->disk_sb.bdev;
|
||||
scoped_guard(rcu) {
|
||||
for_each_online_member_rcu(c, ca) {
|
||||
struct block_device *bdev = ca->disk_sb.bdev;
|
||||
|
||||
/* XXX: create an anonymous device for multi device filesystems */
|
||||
sb->s_bdev = bdev;
|
||||
sb->s_dev = bdev->bd_dev;
|
||||
break;
|
||||
/* XXX: create an anonymous device for multi device filesystems */
|
||||
sb->s_bdev = bdev;
|
||||
sb->s_dev = bdev->bd_dev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
c->dev = sb->s_dev;
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
|
|||
if (!target)
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
devs = bch2_target_to_mask(c, target) ?:
|
||||
&c->rw_devs[BCH_DATA_user];
|
||||
|
||||
|
|
@ -73,7 +73,6 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
|
|||
total += max(congested, 0LL);
|
||||
nr++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return get_random_u32_below(nr * CONGESTED_MAX) < total;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1208,16 +1208,13 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
|
|||
|
||||
e = bkey_s_c_to_extent(k);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
extent_for_each_ptr_decode(e, p, entry) {
|
||||
if (crc_is_encoded(p.crc) || p.has_ec) {
|
||||
rcu_read_unlock();
|
||||
if (crc_is_encoded(p.crc) || p.has_ec)
|
||||
return false;
|
||||
}
|
||||
|
||||
replicas += bch2_extent_ptr_durability(c, &p);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return replicas >= op->opts.data_replicas;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -708,10 +708,9 @@ static unsigned max_dev_latency(struct bch_fs *c)
|
|||
{
|
||||
u64 nsecs = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_rw_member_rcu(c, ca)
|
||||
nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration);
|
||||
rcu_read_unlock();
|
||||
|
||||
return nsecs_to_jiffies(nsecs);
|
||||
}
|
||||
|
|
@ -1732,7 +1731,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
|||
printbuf_tabstop_push(out, 28);
|
||||
out->atomic++;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
s = READ_ONCE(j->reservations);
|
||||
|
||||
prt_printf(out, "flags:\t");
|
||||
|
|
@ -1823,8 +1822,6 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
|||
|
||||
prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1521,7 +1521,7 @@ static void journal_advance_devs_to_next_bucket(struct journal *j,
|
|||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
darray_for_each(*devs, i) {
|
||||
struct bch_dev *ca = rcu_dereference(c->devs[*i]);
|
||||
if (!ca)
|
||||
|
|
@ -1543,7 +1543,6 @@ static void journal_advance_devs_to_next_bucket(struct journal *j,
|
|||
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void __journal_write_alloc(struct journal *j,
|
||||
|
|
|
|||
|
|
@ -148,7 +148,6 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
|
|||
|
||||
BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
if (!ca->journal.nr ||
|
||||
!ca->mi.durability)
|
||||
|
|
@ -166,7 +165,6 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
|
|||
|
||||
array_insert_item(dev_space, nr_devs, pos, space);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (nr_devs < nr_devs_want)
|
||||
return (struct journal_space) { 0, 0 };
|
||||
|
|
@ -191,8 +189,8 @@ void bch2_journal_space_available(struct journal *j)
|
|||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
guard(rcu)();
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
|
|
@ -212,7 +210,6 @@ void bch2_journal_space_available(struct journal *j)
|
|||
max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
|
||||
nr_online++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
j->can_discard = can_discard;
|
||||
|
||||
|
|
@ -223,10 +220,8 @@ void bch2_journal_space_available(struct journal *j)
|
|||
prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
|
||||
"rw journal devs:", nr_online, metadata_replicas_required(c));
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
|
||||
prt_printf(&buf, " %s", ca->name);
|
||||
rcu_read_unlock();
|
||||
|
||||
bch_err(c, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
|
|
@ -626,9 +621,9 @@ static u64 journal_seq_to_flush(struct journal *j)
|
|||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
u64 seq_to_flush = 0;
|
||||
|
||||
spin_lock(&j->lock);
|
||||
guard(spinlock)(&j->lock);
|
||||
guard(rcu)();
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_rw_member_rcu(c, ca) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
unsigned nr_buckets, bucket_to_flush;
|
||||
|
|
@ -643,15 +638,11 @@ static u64 journal_seq_to_flush(struct journal *j)
|
|||
seq_to_flush = max(seq_to_flush,
|
||||
ja->bucket_seq[bucket_to_flush]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Also flush if the pin fifo is more than half full */
|
||||
seq_to_flush = max_t(s64, seq_to_flush,
|
||||
(s64) journal_cur_seq(j) -
|
||||
(j->pin.size >> 1));
|
||||
spin_unlock(&j->lock);
|
||||
|
||||
return seq_to_flush;
|
||||
return max_t(s64, seq_to_flush,
|
||||
(s64) journal_cur_seq(j) -
|
||||
(j->pin.size >> 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -145,13 +145,11 @@ static u64 bkey_lru_type_idx(struct bch_fs *c,
|
|||
case BCH_LRU_fragmentation: {
|
||||
a = bch2_alloc_to_v4(k, &a_convert);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
|
||||
u64 idx = ca
|
||||
return ca
|
||||
? alloc_lru_idx_fragmentation(*a, ca)
|
||||
: 0;
|
||||
rcu_read_unlock();
|
||||
return idx;
|
||||
}
|
||||
case BCH_LRU_stripes:
|
||||
return k.k->type == KEY_TYPE_stripe
|
||||
|
|
|
|||
|
|
@ -1176,7 +1176,7 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg,
|
|||
? c->opts.metadata_replicas
|
||||
: io_opts->data_replicas;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
unsigned i = 0;
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
|
|
@ -1186,7 +1186,6 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg,
|
|||
data_opts->kill_ptrs |= BIT(i);
|
||||
i++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!data_opts->kill_ptrs &&
|
||||
(!nr_good || nr_good >= replicas))
|
||||
|
|
@ -1294,7 +1293,7 @@ static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
|
|||
struct extent_ptr_decoded p;
|
||||
unsigned i = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
|
||||
unsigned d = bch2_extent_ptr_durability(c, &p);
|
||||
|
||||
|
|
@ -1305,7 +1304,6 @@ static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
|
|||
|
||||
i++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return data_opts->kill_ptrs != 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -293,11 +293,9 @@ u64 bch2_copygc_wait_amount(struct bch_fs *c)
|
|||
{
|
||||
u64 wait = U64_MAX;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_rw_member_rcu(c, ca)
|
||||
wait = min(wait, bch2_copygc_dev_wait_amount(ca));
|
||||
rcu_read_unlock();
|
||||
|
||||
return wait;
|
||||
}
|
||||
|
||||
|
|
@ -321,21 +319,21 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
|
|||
|
||||
bch2_printbuf_make_room(out, 4096);
|
||||
|
||||
rcu_read_lock();
|
||||
struct task_struct *t;
|
||||
out->atomic++;
|
||||
scoped_guard(rcu) {
|
||||
prt_printf(out, "Currently calculated wait:\n");
|
||||
for_each_rw_member_rcu(c, ca) {
|
||||
prt_printf(out, " %s:\t", ca->name);
|
||||
prt_human_readable_u64(out, bch2_copygc_dev_wait_amount(ca));
|
||||
prt_newline(out);
|
||||
}
|
||||
|
||||
prt_printf(out, "Currently calculated wait:\n");
|
||||
for_each_rw_member_rcu(c, ca) {
|
||||
prt_printf(out, " %s:\t", ca->name);
|
||||
prt_human_readable_u64(out, bch2_copygc_dev_wait_amount(ca));
|
||||
prt_newline(out);
|
||||
t = rcu_dereference(c->copygc_thread);
|
||||
if (t)
|
||||
get_task_struct(t);
|
||||
}
|
||||
|
||||
struct task_struct *t = rcu_dereference(c->copygc_thread);
|
||||
if (t)
|
||||
get_task_struct(t);
|
||||
--out->atomic;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (t) {
|
||||
bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -7,11 +7,10 @@ void bch2_copygc_wait_to_text(struct printbuf *, struct bch_fs *);
|
|||
|
||||
static inline void bch2_copygc_wakeup(struct bch_fs *c)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct task_struct *p = rcu_dereference(c->copygc_thread);
|
||||
if (p)
|
||||
wake_up_process(p);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void bch2_copygc_stop(struct bch_fs *);
|
||||
|
|
|
|||
|
|
@ -80,13 +80,12 @@ static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
|
|||
unsigned ptr_bit = 1;
|
||||
unsigned rewrite_ptrs = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
|
||||
rewrite_ptrs |= ptr_bit;
|
||||
ptr_bit <<= 1;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return rewrite_ptrs;
|
||||
}
|
||||
|
|
@ -135,12 +134,11 @@ u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
|
|||
}
|
||||
incompressible:
|
||||
if (opts->background_target) {
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
||||
if (!p.ptr.cached &&
|
||||
!bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
|
||||
sectors += p.crc.compressed_size;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
return sectors;
|
||||
|
|
@ -679,11 +677,12 @@ void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
|
|||
}
|
||||
prt_newline(out);
|
||||
|
||||
rcu_read_lock();
|
||||
struct task_struct *t = rcu_dereference(c->rebalance.thread);
|
||||
if (t)
|
||||
get_task_struct(t);
|
||||
rcu_read_unlock();
|
||||
struct task_struct *t;
|
||||
scoped_guard(rcu) {
|
||||
t = rcu_dereference(c->rebalance.thread);
|
||||
if (t)
|
||||
get_task_struct(t);
|
||||
}
|
||||
|
||||
if (t) {
|
||||
bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -819,20 +819,19 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
|||
if (e->data_type == BCH_DATA_cached)
|
||||
continue;
|
||||
|
||||
rcu_read_lock();
|
||||
for (unsigned i = 0; i < e->nr_devs; i++) {
|
||||
if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
|
||||
nr_failed++;
|
||||
continue;
|
||||
scoped_guard(rcu)
|
||||
for (unsigned i = 0; i < e->nr_devs; i++) {
|
||||
if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
|
||||
nr_failed++;
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_online += test_bit(e->devs[i], devs.d);
|
||||
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
|
||||
nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
|
||||
}
|
||||
|
||||
nr_online += test_bit(e->devs[i], devs.d);
|
||||
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
|
||||
nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (nr_online + nr_failed == e->nr_devs)
|
||||
continue;
|
||||
|
||||
|
|
|
|||
|
|
@ -378,14 +378,13 @@ void bch2_sb_members_from_cpu(struct bch_fs *c)
|
|||
{
|
||||
struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
for_each_member_device_rcu(c, ca, NULL) {
|
||||
struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx);
|
||||
|
||||
for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++)
|
||||
m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
|
|
@ -443,20 +442,14 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
|
|||
|
||||
bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
bool ret = true;
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
|
||||
if (!ca)
|
||||
continue;
|
||||
|
||||
if (!bch2_dev_btree_bitmap_marked_sectors(ca, ptr->offset, btree_sectors(c))) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
if (ca &&
|
||||
!bch2_dev_btree_bitmap_marked_sectors(ca, ptr->offset, btree_sectors(c)))
|
||||
return false;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev,
|
||||
|
|
|
|||
|
|
@ -28,12 +28,9 @@ static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
|
|||
|
||||
static inline bool bch2_dev_idx_is_online(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, dev);
|
||||
bool ret = ca && bch2_dev_is_online(ca);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return ca && bch2_dev_is_online(ca);
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_is_healthy(struct bch_dev *ca)
|
||||
|
|
@ -142,12 +139,10 @@ static inline void bch2_dev_put(struct bch_dev *ca)
|
|||
|
||||
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
bch2_dev_put(ca);
|
||||
if ((ca = __bch2_next_dev(c, ca, NULL)))
|
||||
bch2_dev_get(ca);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ca;
|
||||
}
|
||||
|
||||
|
|
@ -166,7 +161,7 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
|
|||
unsigned state_mask,
|
||||
int rw, unsigned ref_idx)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
if (ca)
|
||||
enumerated_ref_put(&ca->io_ref[rw], ref_idx);
|
||||
|
||||
|
|
@ -174,7 +169,6 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
|
|||
(!((1 << ca->mi.state) & state_mask) ||
|
||||
!enumerated_ref_tryget(&ca->io_ref[rw], ref_idx)))
|
||||
;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ca;
|
||||
}
|
||||
|
|
@ -239,11 +233,10 @@ static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
|
|||
|
||||
static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
|
||||
if (ca)
|
||||
bch2_dev_get(ca);
|
||||
rcu_read_unlock();
|
||||
return ca;
|
||||
}
|
||||
|
||||
|
|
@ -299,19 +292,16 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
|
|||
{
|
||||
might_sleep();
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, dev);
|
||||
if (ca && !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx))
|
||||
ca = NULL;
|
||||
rcu_read_unlock();
|
||||
if (!ca || !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx))
|
||||
return NULL;
|
||||
|
||||
if (ca &&
|
||||
(ca->mi.state == BCH_MEMBER_STATE_rw ||
|
||||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)))
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw ||
|
||||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
|
||||
return ca;
|
||||
|
||||
if (ca)
|
||||
enumerated_ref_put(&ca->io_ref[rw], ref_idx);
|
||||
enumerated_ref_put(&ca->io_ref[rw], ref_idx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -339,12 +339,9 @@ static inline bool six_owner_running(struct six_lock *lock)
|
|||
* acquiring the lock and setting the owner field. If we're an RT task
|
||||
* that will live-lock because we won't let the owner complete.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct task_struct *owner = READ_ONCE(lock->owner);
|
||||
bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return owner ? owner_on_cpu(owner) : !rt_or_dl_task(current);
|
||||
}
|
||||
|
||||
static inline bool six_optimistic_spin(struct six_lock *lock,
|
||||
|
|
|
|||
|
|
@ -105,11 +105,8 @@ static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id,
|
|||
|
||||
static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
{
|
||||
rcu_read_lock();
|
||||
bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor);
|
||||
}
|
||||
|
||||
static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
|
|
@ -140,13 +137,11 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
|||
{
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
struct snapshot_table *t = rcu_dereference(c->snapshots);
|
||||
|
||||
if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
|
||||
ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots))
|
||||
return __bch2_snapshot_is_ancestor_early(t, id, ancestor);
|
||||
|
||||
if (likely(ancestor >= IS_ANCESTOR_BITMAP))
|
||||
while (id && id < ancestor - IS_ANCESTOR_BITMAP)
|
||||
|
|
@ -157,9 +152,6 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
|||
: id == ancestor;
|
||||
|
||||
EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -412,10 +404,10 @@ static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
|
|||
u32 bch2_snapshot_oldest_subvol(struct bch_fs *c, u32 snapshot_root,
|
||||
snapshot_id_list *skip)
|
||||
{
|
||||
guard(rcu)();
|
||||
u32 id, subvol = 0, s;
|
||||
retry:
|
||||
id = snapshot_root;
|
||||
rcu_read_lock();
|
||||
while (id && bch2_snapshot_exists(c, id)) {
|
||||
if (!(skip && snapshot_list_has_id(skip, id))) {
|
||||
s = snapshot_t(c, id)->subvol;
|
||||
|
|
@ -427,7 +419,6 @@ retry:
|
|||
if (id == snapshot_root)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!subvol && skip) {
|
||||
skip = NULL;
|
||||
|
|
@ -617,18 +608,14 @@ static int snapshot_tree_ptr_good(struct btree_trans *trans,
|
|||
|
||||
u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
|
||||
{
|
||||
const struct snapshot_t *s;
|
||||
|
||||
if (!id)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
s = snapshot_t(c, id);
|
||||
if (s->parent)
|
||||
id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
guard(rcu)();
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
return s->parent
|
||||
? bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth))
|
||||
: id;
|
||||
}
|
||||
|
||||
static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
|
||||
|
|
@ -1458,11 +1445,9 @@ static unsigned live_child(struct bch_fs *c, u32 id)
|
|||
{
|
||||
struct snapshot_delete *d = &c->snapshot_delete;
|
||||
|
||||
rcu_read_lock();
|
||||
u32 ret = __live_child(rcu_dereference(c->snapshots), id,
|
||||
&d->delete_leaves, &d->delete_interior);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __live_child(rcu_dereference(c->snapshots), id,
|
||||
&d->delete_leaves, &d->delete_interior);
|
||||
}
|
||||
|
||||
static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id)
|
||||
|
|
@ -1719,7 +1704,7 @@ static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s
|
|||
static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
|
||||
interior_delete_list *skip)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
while (interior_delete_has_id(skip, id))
|
||||
id = __bch2_snapshot_parent(c, id);
|
||||
|
||||
|
|
@ -1728,7 +1713,6 @@ static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
|
|||
id = __bch2_snapshot_parent(c, id);
|
||||
} while (interior_delete_has_id(skip, id));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,12 +46,9 @@ static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
|||
|
||||
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
id = s ? s->tree : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
return s ? s->tree : 0;
|
||||
}
|
||||
|
||||
static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
|
|
@ -62,11 +59,8 @@ static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
|||
|
||||
static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
id = __bch2_snapshot_parent_early(c, id);
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
guard(rcu)();
|
||||
return __bch2_snapshot_parent_early(c, id);
|
||||
}
|
||||
|
||||
static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
||||
|
|
@ -88,20 +82,15 @@ static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
|||
|
||||
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
id = __bch2_snapshot_parent(c, id);
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
guard(rcu)();
|
||||
return __bch2_snapshot_parent(c, id);
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
while (n--)
|
||||
id = __bch2_snapshot_parent(c, id);
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
|
|
@ -110,13 +99,11 @@ u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
|
|||
|
||||
static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
|
||||
{
|
||||
u32 parent;
|
||||
guard(rcu)();
|
||||
|
||||
rcu_read_lock();
|
||||
u32 parent;
|
||||
while ((parent = __bch2_snapshot_parent(c, id)))
|
||||
id = parent;
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
|
|
@ -128,11 +115,8 @@ static inline enum snapshot_id_state __bch2_snapshot_id_state(struct bch_fs *c,
|
|||
|
||||
static inline enum snapshot_id_state bch2_snapshot_id_state(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
enum snapshot_id_state ret = __bch2_snapshot_id_state(c, id);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __bch2_snapshot_id_state(c, id);
|
||||
}
|
||||
|
||||
static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id)
|
||||
|
|
@ -142,12 +126,9 @@ static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id)
|
|||
|
||||
static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
|
||||
}
|
||||
|
||||
static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
|
||||
|
|
@ -160,13 +141,8 @@ static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
|
|||
|
||||
static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
|
||||
{
|
||||
u32 depth;
|
||||
|
||||
rcu_read_lock();
|
||||
depth = parent ? snapshot_t(c, parent)->depth + 1 : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return depth;
|
||||
guard(rcu)();
|
||||
return parent ? snapshot_t(c, parent)->depth + 1 : 0;
|
||||
}
|
||||
|
||||
bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
|
||||
|
|
@ -180,12 +156,9 @@ static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ances
|
|||
|
||||
static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
const struct snapshot_t *t = snapshot_t(c, id);
|
||||
bool ret = t && (t->children[0]|t->children[1]) != 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
return t && (t->children[0]|t->children[1]) != 0;
|
||||
}
|
||||
|
||||
static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
|
||||
|
|
|
|||
|
|
@ -141,13 +141,9 @@ static int check_subvol(struct btree_trans *trans,
|
|||
|
||||
if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
|
||||
u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
|
||||
u32 snapshot_tree;
|
||||
u32 snapshot_tree = bch2_snapshot_tree(c, snapshot_root);
|
||||
|
||||
struct bch_snapshot_tree st;
|
||||
|
||||
rcu_read_lock();
|
||||
snapshot_tree = snapshot_t(c, snapshot_root)->tree;
|
||||
rcu_read_unlock();
|
||||
|
||||
ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
|
||||
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
|
|
|
|||
|
|
@ -219,23 +219,17 @@ static int bch2_fs_init_rw(struct bch_fs *);
|
|||
|
||||
struct bch_fs *bch2_dev_to_fs(dev_t dev)
|
||||
{
|
||||
guard(mutex)(&bch_fs_list_lock);
|
||||
guard(rcu)();
|
||||
|
||||
struct bch_fs *c;
|
||||
|
||||
mutex_lock(&bch_fs_list_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(c, &bch_fs_list, list)
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
|
||||
closure_get(&c->cl);
|
||||
goto found;
|
||||
return c;
|
||||
}
|
||||
c = NULL;
|
||||
found:
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&bch_fs_list_lock);
|
||||
|
||||
return c;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
|
||||
|
|
@ -507,13 +501,12 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
|||
|
||||
clear_bit(BCH_FS_clean_shutdown, &c->flags);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_online_member_rcu(c, ca)
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
enumerated_ref_start(&ca->io_ref[WRITE]);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
for_each_online_member_rcu(c, ca)
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
enumerated_ref_start(&ca->io_ref[WRITE]);
|
||||
}
|
||||
|
||||
bch2_recalc_capacity(c);
|
||||
|
||||
|
|
@ -1184,22 +1177,20 @@ int bch2_fs_start(struct bch_fs *c)
|
|||
goto err;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_online_member_rcu(c, ca)
|
||||
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
|
||||
cpu_to_le64(now);
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
for_each_online_member_rcu(c, ca)
|
||||
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
|
||||
cpu_to_le64(now);
|
||||
|
||||
/*
|
||||
* Dno't write superblock yet: recovery might have to downgrade
|
||||
*/
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_online_member_rcu(c, ca)
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
rcu_read_unlock();
|
||||
scoped_guard(rcu)
|
||||
for_each_online_member_rcu(c, ca)
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
bch2_recalc_capacity(c);
|
||||
up_write(&c->state_lock);
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user