mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
\n
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEq1nRK9aeMoq1VSgcnJ2qBz9kQNkFAl3hAFIACgkQnJ2qBz9k QNkV/gf+Kwn7xHg76YXd15lZYBzhgj/ABAYEEAAVY49OOCK5+XVmmAufHesMZ2lU Solt8PvbQ8d5786bWpaYXgrTU3JW37c6x1MDUPDLQ8goXWzx7pZWvD+Yup558rDa H1aoqvFKLgpeVVqkUdvvv2CDbgZyOgGlkDqWeS+c5pZd1NPFZzUAoU26slvQ5h4f t41mbavOIm5DChQ5UjwRNw+pb09GXaHrPBRJwa1XuJYJWAansBcQIsxiiqt/43Gn AzwUGrsz4vrPBk+Kcd0SGb8vinFVQr19gBFKFeN3rPFUEUn6T0FPBqaYeiNTNE37 AqASYKlIuhcSf0Wdvx6vxwSHsFl5VA== =NGxV -----END PGP SIGNATURE----- Merge tag 'for_v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs Pull ext2, quota, reiserfs cleanups and fixes from Jan Kara: - Refactor the quota on/off kernel internal interfaces (mostly for ubifs quota support as ubifs does not want to have inodes holding quota information) - A few other small quota fixes and cleanups - Various small ext2 fixes and cleanups - Reiserfs xattr fix and one cleanup * tag 'for_v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (28 commits) ext2: code cleanup for descriptor_loc() fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long ext2: fix improper function comment ext2: code cleanup for ext2_try_to_allocate() ext2: skip unnecessary operations in ext2_try_to_allocate() ext2: Simplify initialization in ext2_try_to_allocate() ext2: code cleanup by calling ext2_group_last_block_no() ext2: introduce new helper ext2_group_last_block_no() reiserfs: replace open-coded atomic_dec_and_mutex_lock() ext2: check err when partial != NULL quota: Handle quotas without quota inodes in dquot_get_state() quota: Make dquot_disable() work without quota inodes quota: Drop dquot_enable() fs: Use dquot_load_quota_inode() from filesystems quota: Rename vfs_load_quota_inode() to dquot_load_quota_inode() quota: Simplify dquot_resume() quota: Factor out setup of quota inode quota: Check that quota is not dirty before release quota: fix livelock in dquot_writeback_dquots ext2: don't set *count in the case of failure in ext2_try_to_allocate() ...
This commit is contained in:
commit
b8072d5b3c
|
@ -269,7 +269,7 @@ goal_in_my_reservation(struct ext2_reserve_window *rsv, ext2_grpblk_t grp_goal,
|
||||||
ext2_fsblk_t group_first_block, group_last_block;
|
ext2_fsblk_t group_first_block, group_last_block;
|
||||||
|
|
||||||
group_first_block = ext2_group_first_block_no(sb, group);
|
group_first_block = ext2_group_first_block_no(sb, group);
|
||||||
group_last_block = group_first_block + EXT2_BLOCKS_PER_GROUP(sb) - 1;
|
group_last_block = ext2_group_last_block_no(sb, group);
|
||||||
|
|
||||||
if ((rsv->_rsv_start > group_last_block) ||
|
if ((rsv->_rsv_start > group_last_block) ||
|
||||||
(rsv->_rsv_end < group_first_block))
|
(rsv->_rsv_end < group_first_block))
|
||||||
|
@ -666,37 +666,24 @@ ext2_try_to_allocate(struct super_block *sb, int group,
|
||||||
unsigned long *count,
|
unsigned long *count,
|
||||||
struct ext2_reserve_window *my_rsv)
|
struct ext2_reserve_window *my_rsv)
|
||||||
{
|
{
|
||||||
ext2_fsblk_t group_first_block;
|
ext2_fsblk_t group_first_block = ext2_group_first_block_no(sb, group);
|
||||||
|
ext2_fsblk_t group_last_block = ext2_group_last_block_no(sb, group);
|
||||||
ext2_grpblk_t start, end;
|
ext2_grpblk_t start, end;
|
||||||
unsigned long num = 0;
|
unsigned long num = 0;
|
||||||
|
|
||||||
|
start = 0;
|
||||||
|
end = group_last_block - group_first_block + 1;
|
||||||
/* we do allocation within the reservation window if we have a window */
|
/* we do allocation within the reservation window if we have a window */
|
||||||
if (my_rsv) {
|
if (my_rsv) {
|
||||||
group_first_block = ext2_group_first_block_no(sb, group);
|
|
||||||
if (my_rsv->_rsv_start >= group_first_block)
|
if (my_rsv->_rsv_start >= group_first_block)
|
||||||
start = my_rsv->_rsv_start - group_first_block;
|
start = my_rsv->_rsv_start - group_first_block;
|
||||||
else
|
if (my_rsv->_rsv_end < group_last_block)
|
||||||
/* reservation window cross group boundary */
|
end = my_rsv->_rsv_end - group_first_block + 1;
|
||||||
start = 0;
|
if (grp_goal < start || grp_goal >= end)
|
||||||
end = my_rsv->_rsv_end - group_first_block + 1;
|
|
||||||
if (end > EXT2_BLOCKS_PER_GROUP(sb))
|
|
||||||
/* reservation window crosses group boundary */
|
|
||||||
end = EXT2_BLOCKS_PER_GROUP(sb);
|
|
||||||
if ((start <= grp_goal) && (grp_goal < end))
|
|
||||||
start = grp_goal;
|
|
||||||
else
|
|
||||||
grp_goal = -1;
|
grp_goal = -1;
|
||||||
} else {
|
|
||||||
if (grp_goal > 0)
|
|
||||||
start = grp_goal;
|
|
||||||
else
|
|
||||||
start = 0;
|
|
||||||
end = EXT2_BLOCKS_PER_GROUP(sb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
|
BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
|
||||||
|
|
||||||
repeat:
|
|
||||||
if (grp_goal < 0) {
|
if (grp_goal < 0) {
|
||||||
grp_goal = find_next_usable_block(start, bitmap_bh, end);
|
grp_goal = find_next_usable_block(start, bitmap_bh, end);
|
||||||
if (grp_goal < 0)
|
if (grp_goal < 0)
|
||||||
|
@ -711,32 +698,23 @@ repeat:
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
start = grp_goal;
|
|
||||||
|
|
||||||
if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), grp_goal,
|
for (; num < *count && grp_goal < end; grp_goal++) {
|
||||||
bitmap_bh->b_data)) {
|
if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
|
||||||
/*
|
|
||||||
* The block was allocated by another thread, or it was
|
|
||||||
* allocated and then freed by another thread
|
|
||||||
*/
|
|
||||||
start++;
|
|
||||||
grp_goal++;
|
|
||||||
if (start >= end)
|
|
||||||
goto fail_access;
|
|
||||||
goto repeat;
|
|
||||||
}
|
|
||||||
num++;
|
|
||||||
grp_goal++;
|
|
||||||
while (num < *count && grp_goal < end
|
|
||||||
&& !ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
|
|
||||||
grp_goal, bitmap_bh->b_data)) {
|
grp_goal, bitmap_bh->b_data)) {
|
||||||
|
if (num == 0)
|
||||||
|
continue;
|
||||||
|
break;
|
||||||
|
}
|
||||||
num++;
|
num++;
|
||||||
grp_goal++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (num == 0)
|
||||||
|
goto fail_access;
|
||||||
|
|
||||||
*count = num;
|
*count = num;
|
||||||
return grp_goal - num;
|
return grp_goal - num;
|
||||||
fail_access:
|
fail_access:
|
||||||
*count = num;
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,10 +732,9 @@ fail_access:
|
||||||
* but we will shift to the place where start_block is,
|
* but we will shift to the place where start_block is,
|
||||||
* then start from there, when looking for a reservable space.
|
* then start from there, when looking for a reservable space.
|
||||||
*
|
*
|
||||||
* @size: the target new reservation window size
|
* @sb: the super block.
|
||||||
*
|
*
|
||||||
* @group_first_block: the first block we consider to start
|
* @start_block: the first block we consider to start the real search from
|
||||||
* the real search from
|
|
||||||
*
|
*
|
||||||
* @last_block:
|
* @last_block:
|
||||||
* the maximum block number that our goal reservable space
|
* the maximum block number that our goal reservable space
|
||||||
|
@ -908,7 +885,7 @@ static int alloc_new_reservation(struct ext2_reserve_window_node *my_rsv,
|
||||||
spinlock_t *rsv_lock = &EXT2_SB(sb)->s_rsv_window_lock;
|
spinlock_t *rsv_lock = &EXT2_SB(sb)->s_rsv_window_lock;
|
||||||
|
|
||||||
group_first_block = ext2_group_first_block_no(sb, group);
|
group_first_block = ext2_group_first_block_no(sb, group);
|
||||||
group_end_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
|
group_end_block = ext2_group_last_block_no(sb, group);
|
||||||
|
|
||||||
if (grp_goal < 0)
|
if (grp_goal < 0)
|
||||||
start_block = group_first_block;
|
start_block = group_first_block;
|
||||||
|
@ -1115,7 +1092,7 @@ ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group,
|
||||||
* first block is the block number of the first block in this group
|
* first block is the block number of the first block in this group
|
||||||
*/
|
*/
|
||||||
group_first_block = ext2_group_first_block_no(sb, group);
|
group_first_block = ext2_group_first_block_no(sb, group);
|
||||||
group_last_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
|
group_last_block = ext2_group_last_block_no(sb, group);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Basically we will allocate a new block from inode's reservation
|
* Basically we will allocate a new block from inode's reservation
|
||||||
|
@ -1313,6 +1290,13 @@ retry_alloc:
|
||||||
if (free_blocks > 0) {
|
if (free_blocks > 0) {
|
||||||
grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
|
grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
|
||||||
EXT2_BLOCKS_PER_GROUP(sb));
|
EXT2_BLOCKS_PER_GROUP(sb));
|
||||||
|
/*
|
||||||
|
* In case we retry allocation (due to fs reservation not
|
||||||
|
* working out or fs corruption), the bitmap_bh is non-null
|
||||||
|
* pointer and we have to release it before calling
|
||||||
|
* read_block_bitmap().
|
||||||
|
*/
|
||||||
|
brelse(bitmap_bh);
|
||||||
bitmap_bh = read_block_bitmap(sb, group_no);
|
bitmap_bh = read_block_bitmap(sb, group_no);
|
||||||
if (!bitmap_bh)
|
if (!bitmap_bh)
|
||||||
goto io_error;
|
goto io_error;
|
||||||
|
@ -1404,6 +1388,7 @@ allocated:
|
||||||
* use. So we may want to selectively mark some of the blocks
|
* use. So we may want to selectively mark some of the blocks
|
||||||
* as free
|
* as free
|
||||||
*/
|
*/
|
||||||
|
num = *count;
|
||||||
goto retry_alloc;
|
goto retry_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -813,6 +813,18 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
|
||||||
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
|
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline ext2_fsblk_t
|
||||||
|
ext2_group_last_block_no(struct super_block *sb, unsigned long group_no)
|
||||||
|
{
|
||||||
|
struct ext2_sb_info *sbi = EXT2_SB(sb);
|
||||||
|
|
||||||
|
if (group_no == sbi->s_groups_count - 1)
|
||||||
|
return le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
|
||||||
|
else
|
||||||
|
return ext2_group_first_block_no(sb, group_no) +
|
||||||
|
EXT2_BLOCKS_PER_GROUP(sb) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
#define ext2_set_bit __test_and_set_bit_le
|
#define ext2_set_bit __test_and_set_bit_le
|
||||||
#define ext2_clear_bit __test_and_clear_bit_le
|
#define ext2_clear_bit __test_and_clear_bit_le
|
||||||
#define ext2_test_bit test_bit_le
|
#define ext2_test_bit test_bit_le
|
||||||
|
|
|
@ -701,10 +701,13 @@ static int ext2_get_blocks(struct inode *inode,
|
||||||
if (!partial) {
|
if (!partial) {
|
||||||
count++;
|
count++;
|
||||||
mutex_unlock(&ei->truncate_mutex);
|
mutex_unlock(&ei->truncate_mutex);
|
||||||
if (err)
|
|
||||||
goto cleanup;
|
|
||||||
goto got_it;
|
goto got_it;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
mutex_unlock(&ei->truncate_mutex);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -145,10 +145,13 @@ setversion_out:
|
||||||
if (ei->i_block_alloc_info){
|
if (ei->i_block_alloc_info){
|
||||||
struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
|
struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
|
||||||
rsv->rsv_goal_size = rsv_window_size;
|
rsv->rsv_goal_size = rsv_window_size;
|
||||||
|
} else {
|
||||||
|
ret = -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&ei->truncate_mutex);
|
mutex_unlock(&ei->truncate_mutex);
|
||||||
mnt_drop_write_file(filp);
|
mnt_drop_write_file(filp);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
|
|
@ -702,13 +702,7 @@ static int ext2_check_descriptors(struct super_block *sb)
|
||||||
for (i = 0; i < sbi->s_groups_count; i++) {
|
for (i = 0; i < sbi->s_groups_count; i++) {
|
||||||
struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
|
struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
|
||||||
ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
|
ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
|
||||||
ext2_fsblk_t last_block;
|
ext2_fsblk_t last_block = ext2_group_last_block_no(sb, i);
|
||||||
|
|
||||||
if (i == sbi->s_groups_count - 1)
|
|
||||||
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
|
|
||||||
else
|
|
||||||
last_block = first_block +
|
|
||||||
(EXT2_BLOCKS_PER_GROUP(sb) - 1);
|
|
||||||
|
|
||||||
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
|
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
|
||||||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
|
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
|
||||||
|
@ -806,7 +800,6 @@ static unsigned long descriptor_loc(struct super_block *sb,
|
||||||
{
|
{
|
||||||
struct ext2_sb_info *sbi = EXT2_SB(sb);
|
struct ext2_sb_info *sbi = EXT2_SB(sb);
|
||||||
unsigned long bg, first_meta_bg;
|
unsigned long bg, first_meta_bg;
|
||||||
int has_super = 0;
|
|
||||||
|
|
||||||
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
|
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
|
||||||
|
|
||||||
|
@ -814,10 +807,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
|
||||||
nr < first_meta_bg)
|
nr < first_meta_bg)
|
||||||
return (logic_sb_block + nr + 1);
|
return (logic_sb_block + nr + 1);
|
||||||
bg = sbi->s_desc_per_block * nr;
|
bg = sbi->s_desc_per_block * nr;
|
||||||
if (ext2_bg_has_super(sb, bg))
|
|
||||||
has_super = 1;
|
|
||||||
|
|
||||||
return ext2_group_first_block_no(sb, bg) + has_super;
|
return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
|
|
|
@ -5834,7 +5834,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
|
||||||
/* Don't account quota for quota files to avoid recursion */
|
/* Don't account quota for quota files to avoid recursion */
|
||||||
qf_inode->i_flags |= S_NOQUOTA;
|
qf_inode->i_flags |= S_NOQUOTA;
|
||||||
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
|
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
|
||||||
err = dquot_enable(qf_inode, type, format_id, flags);
|
err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
|
||||||
if (err)
|
if (err)
|
||||||
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
|
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
|
||||||
iput(qf_inode);
|
iput(qf_inode);
|
||||||
|
|
|
@ -1940,7 +1940,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
|
||||||
|
|
||||||
/* Don't account quota for quota files to avoid recursion */
|
/* Don't account quota for quota files to avoid recursion */
|
||||||
qf_inode->i_flags |= S_NOQUOTA;
|
qf_inode->i_flags |= S_NOQUOTA;
|
||||||
err = dquot_enable(qf_inode, type, format_id, flags);
|
err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
|
||||||
iput(qf_inode);
|
iput(qf_inode);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -728,7 +728,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
|
||||||
|
|
||||||
mutex_lock(&dquot->dq_lock);
|
mutex_lock(&dquot->dq_lock);
|
||||||
/* Check whether we are not racing with some other dqget() */
|
/* Check whether we are not racing with some other dqget() */
|
||||||
if (atomic_read(&dquot->dq_count) > 1)
|
if (dquot_is_busy(dquot))
|
||||||
goto out;
|
goto out;
|
||||||
/* Running from downconvert thread? Postpone quota processing to wq */
|
/* Running from downconvert thread? Postpone quota processing to wq */
|
||||||
if (current == osb->dc_task) {
|
if (current == osb->dc_task) {
|
||||||
|
|
|
@ -926,8 +926,8 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
|
||||||
status = -ENOENT;
|
status = -ENOENT;
|
||||||
goto out_quota_off;
|
goto out_quota_off;
|
||||||
}
|
}
|
||||||
status = dquot_enable(inode[type], type, QFMT_OCFS2,
|
status = dquot_load_quota_inode(inode[type], type, QFMT_OCFS2,
|
||||||
DQUOT_USAGE_ENABLED);
|
DQUOT_USAGE_ENABLED);
|
||||||
if (status < 0)
|
if (status < 0)
|
||||||
goto out_quota_off;
|
goto out_quota_off;
|
||||||
}
|
}
|
||||||
|
|
289
fs/quota/dquot.c
289
fs/quota/dquot.c
|
@ -497,7 +497,7 @@ int dquot_release(struct dquot *dquot)
|
||||||
|
|
||||||
mutex_lock(&dquot->dq_lock);
|
mutex_lock(&dquot->dq_lock);
|
||||||
/* Check whether we are not racing with some other dqget() */
|
/* Check whether we are not racing with some other dqget() */
|
||||||
if (atomic_read(&dquot->dq_count) > 1)
|
if (dquot_is_busy(dquot))
|
||||||
goto out_dqlock;
|
goto out_dqlock;
|
||||||
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
|
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
|
||||||
ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
|
ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
|
||||||
|
@ -595,7 +595,6 @@ int dquot_scan_active(struct super_block *sb,
|
||||||
/* Now we have active dquot so we can just increase use count */
|
/* Now we have active dquot so we can just increase use count */
|
||||||
atomic_inc(&dquot->dq_count);
|
atomic_inc(&dquot->dq_count);
|
||||||
spin_unlock(&dq_list_lock);
|
spin_unlock(&dq_list_lock);
|
||||||
dqstats_inc(DQST_LOOKUPS);
|
|
||||||
dqput(old_dquot);
|
dqput(old_dquot);
|
||||||
old_dquot = dquot;
|
old_dquot = dquot;
|
||||||
/*
|
/*
|
||||||
|
@ -623,7 +622,7 @@ EXPORT_SYMBOL(dquot_scan_active);
|
||||||
/* Write all dquot structures to quota files */
|
/* Write all dquot structures to quota files */
|
||||||
int dquot_writeback_dquots(struct super_block *sb, int type)
|
int dquot_writeback_dquots(struct super_block *sb, int type)
|
||||||
{
|
{
|
||||||
struct list_head *dirty;
|
struct list_head dirty;
|
||||||
struct dquot *dquot;
|
struct dquot *dquot;
|
||||||
struct quota_info *dqopt = sb_dqopt(sb);
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
int cnt;
|
int cnt;
|
||||||
|
@ -637,9 +636,10 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
|
||||||
if (!sb_has_quota_active(sb, cnt))
|
if (!sb_has_quota_active(sb, cnt))
|
||||||
continue;
|
continue;
|
||||||
spin_lock(&dq_list_lock);
|
spin_lock(&dq_list_lock);
|
||||||
dirty = &dqopt->info[cnt].dqi_dirty_list;
|
/* Move list away to avoid livelock. */
|
||||||
while (!list_empty(dirty)) {
|
list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
|
||||||
dquot = list_first_entry(dirty, struct dquot,
|
while (!list_empty(&dirty)) {
|
||||||
|
dquot = list_first_entry(&dirty, struct dquot,
|
||||||
dq_dirty);
|
dq_dirty);
|
||||||
|
|
||||||
WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
|
WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
|
||||||
|
@ -649,7 +649,6 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
|
||||||
* use count */
|
* use count */
|
||||||
dqgrab(dquot);
|
dqgrab(dquot);
|
||||||
spin_unlock(&dq_list_lock);
|
spin_unlock(&dq_list_lock);
|
||||||
dqstats_inc(DQST_LOOKUPS);
|
|
||||||
err = sb->dq_op->write_dquot(dquot);
|
err = sb->dq_op->write_dquot(dquot);
|
||||||
if (err) {
|
if (err) {
|
||||||
/*
|
/*
|
||||||
|
@ -2162,14 +2161,29 @@ int dquot_file_open(struct inode *inode, struct file *file)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dquot_file_open);
|
EXPORT_SYMBOL(dquot_file_open);
|
||||||
|
|
||||||
|
static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
|
||||||
|
{
|
||||||
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
|
struct inode *inode = dqopt->files[type];
|
||||||
|
|
||||||
|
if (!inode)
|
||||||
|
return;
|
||||||
|
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
|
||||||
|
inode_lock(inode);
|
||||||
|
inode->i_flags &= ~S_NOQUOTA;
|
||||||
|
inode_unlock(inode);
|
||||||
|
}
|
||||||
|
dqopt->files[type] = NULL;
|
||||||
|
iput(inode);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
|
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
|
||||||
*/
|
*/
|
||||||
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
||||||
{
|
{
|
||||||
int cnt, ret = 0;
|
int cnt;
|
||||||
struct quota_info *dqopt = sb_dqopt(sb);
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
struct inode *toputinode[MAXQUOTAS];
|
|
||||||
|
|
||||||
/* s_umount should be held in exclusive mode */
|
/* s_umount should be held in exclusive mode */
|
||||||
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
|
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
|
||||||
|
@ -2191,7 +2205,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
|
||||||
toputinode[cnt] = NULL;
|
|
||||||
if (type != -1 && cnt != type)
|
if (type != -1 && cnt != type)
|
||||||
continue;
|
continue;
|
||||||
if (!sb_has_quota_loaded(sb, cnt))
|
if (!sb_has_quota_loaded(sb, cnt))
|
||||||
|
@ -2211,8 +2224,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
||||||
dqopt->flags &= ~dquot_state_flag(
|
dqopt->flags &= ~dquot_state_flag(
|
||||||
DQUOT_SUSPENDED, cnt);
|
DQUOT_SUSPENDED, cnt);
|
||||||
spin_unlock(&dq_state_lock);
|
spin_unlock(&dq_state_lock);
|
||||||
iput(dqopt->files[cnt]);
|
vfs_cleanup_quota_inode(sb, cnt);
|
||||||
dqopt->files[cnt] = NULL;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_unlock(&dq_state_lock);
|
spin_unlock(&dq_state_lock);
|
||||||
|
@ -2234,10 +2246,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
||||||
if (dqopt->ops[cnt]->free_file_info)
|
if (dqopt->ops[cnt]->free_file_info)
|
||||||
dqopt->ops[cnt]->free_file_info(sb, cnt);
|
dqopt->ops[cnt]->free_file_info(sb, cnt);
|
||||||
put_quota_format(dqopt->info[cnt].dqi_format);
|
put_quota_format(dqopt->info[cnt].dqi_format);
|
||||||
|
|
||||||
toputinode[cnt] = dqopt->files[cnt];
|
|
||||||
if (!sb_has_quota_loaded(sb, cnt))
|
|
||||||
dqopt->files[cnt] = NULL;
|
|
||||||
dqopt->info[cnt].dqi_flags = 0;
|
dqopt->info[cnt].dqi_flags = 0;
|
||||||
dqopt->info[cnt].dqi_igrace = 0;
|
dqopt->info[cnt].dqi_igrace = 0;
|
||||||
dqopt->info[cnt].dqi_bgrace = 0;
|
dqopt->info[cnt].dqi_bgrace = 0;
|
||||||
|
@ -2259,32 +2267,22 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
|
||||||
* must also discard the blockdev buffers so that we see the
|
* must also discard the blockdev buffers so that we see the
|
||||||
* changes done by userspace on the next quotaon() */
|
* changes done by userspace on the next quotaon() */
|
||||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||||
/* This can happen when suspending quotas on remount-ro... */
|
if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
|
||||||
if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
|
inode_lock(dqopt->files[cnt]);
|
||||||
inode_lock(toputinode[cnt]);
|
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
|
||||||
toputinode[cnt]->i_flags &= ~S_NOQUOTA;
|
inode_unlock(dqopt->files[cnt]);
|
||||||
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
|
|
||||||
inode_unlock(toputinode[cnt]);
|
|
||||||
mark_inode_dirty_sync(toputinode[cnt]);
|
|
||||||
}
|
}
|
||||||
if (sb->s_bdev)
|
if (sb->s_bdev)
|
||||||
invalidate_bdev(sb->s_bdev);
|
invalidate_bdev(sb->s_bdev);
|
||||||
put_inodes:
|
put_inodes:
|
||||||
|
/* We are done when suspending quotas */
|
||||||
|
if (flags & DQUOT_SUSPENDED)
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
|
||||||
if (toputinode[cnt]) {
|
if (!sb_has_quota_loaded(sb, cnt))
|
||||||
/* On remount RO, we keep the inode pointer so that we
|
vfs_cleanup_quota_inode(sb, cnt);
|
||||||
* can reenable quota on the subsequent remount RW. We
|
return 0;
|
||||||
* have to check 'flags' variable and not use sb_has_
|
|
||||||
* function because another quotaon / quotaoff could
|
|
||||||
* change global state before we got here. We refuse
|
|
||||||
* to suspend quotas when there is pending delete on
|
|
||||||
* the quota file... */
|
|
||||||
if (!(flags & DQUOT_SUSPENDED))
|
|
||||||
iput(toputinode[cnt]);
|
|
||||||
else if (!toputinode[cnt]->i_nlink)
|
|
||||||
ret = -EBUSY;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dquot_disable);
|
EXPORT_SYMBOL(dquot_disable);
|
||||||
|
|
||||||
|
@ -2299,28 +2297,52 @@ EXPORT_SYMBOL(dquot_quota_off);
|
||||||
* Turn quotas on on a device
|
* Turn quotas on on a device
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
static int vfs_setup_quota_inode(struct inode *inode, int type)
|
||||||
* Helper function to turn quotas on when we already have the inode of
|
{
|
||||||
* quota file and no quota information is loaded.
|
struct super_block *sb = inode->i_sb;
|
||||||
*/
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
|
|
||||||
|
if (!S_ISREG(inode->i_mode))
|
||||||
|
return -EACCES;
|
||||||
|
if (IS_RDONLY(inode))
|
||||||
|
return -EROFS;
|
||||||
|
if (sb_has_quota_loaded(sb, type))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
dqopt->files[type] = igrab(inode);
|
||||||
|
if (!dqopt->files[type])
|
||||||
|
return -EIO;
|
||||||
|
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
|
||||||
|
/* We don't want quota and atime on quota files (deadlocks
|
||||||
|
* possible) Also nobody should write to the file - we use
|
||||||
|
* special IO operations which ignore the immutable bit. */
|
||||||
|
inode_lock(inode);
|
||||||
|
inode->i_flags |= S_NOQUOTA;
|
||||||
|
inode_unlock(inode);
|
||||||
|
/*
|
||||||
|
* When S_NOQUOTA is set, remove dquot references as no more
|
||||||
|
* references can be added
|
||||||
|
*/
|
||||||
|
__dquot_drop(inode);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
|
||||||
unsigned int flags)
|
unsigned int flags)
|
||||||
{
|
{
|
||||||
struct quota_format_type *fmt = find_quota_format(format_id);
|
struct quota_format_type *fmt = find_quota_format(format_id);
|
||||||
struct super_block *sb = inode->i_sb;
|
|
||||||
struct quota_info *dqopt = sb_dqopt(sb);
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
/* Just unsuspend quotas? */
|
||||||
|
BUG_ON(flags & DQUOT_SUSPENDED);
|
||||||
|
/* s_umount should be held in exclusive mode */
|
||||||
|
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
|
||||||
|
up_read(&sb->s_umount);
|
||||||
|
|
||||||
if (!fmt)
|
if (!fmt)
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
if (!S_ISREG(inode->i_mode)) {
|
|
||||||
error = -EACCES;
|
|
||||||
goto out_fmt;
|
|
||||||
}
|
|
||||||
if (IS_RDONLY(inode)) {
|
|
||||||
error = -EROFS;
|
|
||||||
goto out_fmt;
|
|
||||||
}
|
|
||||||
if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
|
if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
|
||||||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
|
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
|
@ -2352,27 +2374,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
|
||||||
invalidate_bdev(sb->s_bdev);
|
invalidate_bdev(sb->s_bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
|
|
||||||
/* We don't want quota and atime on quota files (deadlocks
|
|
||||||
* possible) Also nobody should write to the file - we use
|
|
||||||
* special IO operations which ignore the immutable bit. */
|
|
||||||
inode_lock(inode);
|
|
||||||
inode->i_flags |= S_NOQUOTA;
|
|
||||||
inode_unlock(inode);
|
|
||||||
/*
|
|
||||||
* When S_NOQUOTA is set, remove dquot references as no more
|
|
||||||
* references can be added
|
|
||||||
*/
|
|
||||||
__dquot_drop(inode);
|
|
||||||
}
|
|
||||||
|
|
||||||
error = -EIO;
|
|
||||||
dqopt->files[type] = igrab(inode);
|
|
||||||
if (!dqopt->files[type])
|
|
||||||
goto out_file_flags;
|
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
if (!fmt->qf_ops->check_quota_file(sb, type))
|
if (!fmt->qf_ops->check_quota_file(sb, type))
|
||||||
goto out_file_init;
|
goto out_fmt;
|
||||||
|
|
||||||
dqopt->ops[type] = fmt->qf_ops;
|
dqopt->ops[type] = fmt->qf_ops;
|
||||||
dqopt->info[type].dqi_format = fmt;
|
dqopt->info[type].dqi_format = fmt;
|
||||||
|
@ -2380,7 +2384,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
|
||||||
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
|
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
|
||||||
error = dqopt->ops[type]->read_file_info(sb, type);
|
error = dqopt->ops[type]->read_file_info(sb, type);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
goto out_file_init;
|
goto out_fmt;
|
||||||
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
|
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
|
||||||
spin_lock(&dq_data_lock);
|
spin_lock(&dq_data_lock);
|
||||||
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
|
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
|
||||||
|
@ -2395,24 +2399,36 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
|
||||||
dquot_disable(sb, type, flags);
|
dquot_disable(sb, type, flags);
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
out_file_init:
|
|
||||||
dqopt->files[type] = NULL;
|
|
||||||
iput(inode);
|
|
||||||
out_file_flags:
|
|
||||||
inode_lock(inode);
|
|
||||||
inode->i_flags &= ~S_NOQUOTA;
|
|
||||||
inode_unlock(inode);
|
|
||||||
out_fmt:
|
out_fmt:
|
||||||
put_quota_format(fmt);
|
put_quota_format(fmt);
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(dquot_load_quota_sb);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* More powerful function for turning on quotas on given quota inode allowing
|
||||||
|
* setting of individual quota flags
|
||||||
|
*/
|
||||||
|
int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = vfs_setup_quota_inode(inode, type);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
|
||||||
|
if (err < 0)
|
||||||
|
vfs_cleanup_quota_inode(inode->i_sb, type);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dquot_load_quota_inode);
|
||||||
|
|
||||||
/* Reenable quotas on remount RW */
|
/* Reenable quotas on remount RW */
|
||||||
int dquot_resume(struct super_block *sb, int type)
|
int dquot_resume(struct super_block *sb, int type)
|
||||||
{
|
{
|
||||||
struct quota_info *dqopt = sb_dqopt(sb);
|
struct quota_info *dqopt = sb_dqopt(sb);
|
||||||
struct inode *inode;
|
|
||||||
int ret = 0, cnt;
|
int ret = 0, cnt;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
|
|
||||||
|
@ -2426,8 +2442,6 @@ int dquot_resume(struct super_block *sb, int type)
|
||||||
if (!sb_has_quota_suspended(sb, cnt))
|
if (!sb_has_quota_suspended(sb, cnt))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
inode = dqopt->files[cnt];
|
|
||||||
dqopt->files[cnt] = NULL;
|
|
||||||
spin_lock(&dq_state_lock);
|
spin_lock(&dq_state_lock);
|
||||||
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
|
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
|
||||||
DQUOT_LIMITS_ENABLED,
|
DQUOT_LIMITS_ENABLED,
|
||||||
|
@ -2436,9 +2450,10 @@ int dquot_resume(struct super_block *sb, int type)
|
||||||
spin_unlock(&dq_state_lock);
|
spin_unlock(&dq_state_lock);
|
||||||
|
|
||||||
flags = dquot_generic_flag(flags, cnt);
|
flags = dquot_generic_flag(flags, cnt);
|
||||||
ret = vfs_load_quota_inode(inode, cnt,
|
ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
|
||||||
dqopt->info[cnt].dqi_fmt_id, flags);
|
flags);
|
||||||
iput(inode);
|
if (ret < 0)
|
||||||
|
vfs_cleanup_quota_inode(sb, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2455,48 +2470,13 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
|
||||||
if (path->dentry->d_sb != sb)
|
if (path->dentry->d_sb != sb)
|
||||||
error = -EXDEV;
|
error = -EXDEV;
|
||||||
else
|
else
|
||||||
error = vfs_load_quota_inode(d_inode(path->dentry), type,
|
error = dquot_load_quota_inode(d_inode(path->dentry), type,
|
||||||
format_id, DQUOT_USAGE_ENABLED |
|
format_id, DQUOT_USAGE_ENABLED |
|
||||||
DQUOT_LIMITS_ENABLED);
|
DQUOT_LIMITS_ENABLED);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dquot_quota_on);
|
EXPORT_SYMBOL(dquot_quota_on);
|
||||||
|
|
||||||
/*
|
|
||||||
* More powerful function for turning on quotas allowing setting
|
|
||||||
* of individual quota flags
|
|
||||||
*/
|
|
||||||
int dquot_enable(struct inode *inode, int type, int format_id,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
struct super_block *sb = inode->i_sb;
|
|
||||||
|
|
||||||
/* Just unsuspend quotas? */
|
|
||||||
BUG_ON(flags & DQUOT_SUSPENDED);
|
|
||||||
/* s_umount should be held in exclusive mode */
|
|
||||||
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
|
|
||||||
up_read(&sb->s_umount);
|
|
||||||
|
|
||||||
if (!flags)
|
|
||||||
return 0;
|
|
||||||
/* Just updating flags needed? */
|
|
||||||
if (sb_has_quota_loaded(sb, type)) {
|
|
||||||
if (flags & DQUOT_USAGE_ENABLED &&
|
|
||||||
sb_has_quota_usage_enabled(sb, type))
|
|
||||||
return -EBUSY;
|
|
||||||
if (flags & DQUOT_LIMITS_ENABLED &&
|
|
||||||
sb_has_quota_limits_enabled(sb, type))
|
|
||||||
return -EBUSY;
|
|
||||||
spin_lock(&dq_state_lock);
|
|
||||||
sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
|
|
||||||
spin_unlock(&dq_state_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return vfs_load_quota_inode(inode, type, format_id, flags);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dquot_enable);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is used when filesystem needs to initialize quotas
|
* This function is used when filesystem needs to initialize quotas
|
||||||
* during mount time.
|
* during mount time.
|
||||||
|
@ -2518,7 +2498,7 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
|
||||||
|
|
||||||
error = security_quota_on(dentry);
|
error = security_quota_on(dentry);
|
||||||
if (!error)
|
if (!error)
|
||||||
error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
|
error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
|
||||||
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -2543,13 +2523,17 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
|
||||||
if (!(flags & qtype_enforce_flag(type)))
|
if (!(flags & qtype_enforce_flag(type)))
|
||||||
continue;
|
continue;
|
||||||
/* Can't enforce without accounting */
|
/* Can't enforce without accounting */
|
||||||
if (!sb_has_quota_usage_enabled(sb, type))
|
if (!sb_has_quota_usage_enabled(sb, type)) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
ret = dquot_enable(dqopt->files[type], type,
|
|
||||||
dqopt->info[type].dqi_fmt_id,
|
|
||||||
DQUOT_LIMITS_ENABLED);
|
|
||||||
if (ret < 0)
|
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
}
|
||||||
|
if (sb_has_quota_limits_enabled(sb, type)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
spin_lock(&dq_state_lock);
|
||||||
|
dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
|
||||||
|
spin_unlock(&dq_state_lock);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
out_err:
|
out_err:
|
||||||
|
@ -2599,10 +2583,12 @@ static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
|
||||||
out_err:
|
out_err:
|
||||||
/* Backout enforcement disabling we already did */
|
/* Backout enforcement disabling we already did */
|
||||||
for (type--; type >= 0; type--) {
|
for (type--; type >= 0; type--) {
|
||||||
if (flags & qtype_enforce_flag(type))
|
if (flags & qtype_enforce_flag(type)) {
|
||||||
dquot_enable(dqopt->files[type], type,
|
spin_lock(&dq_state_lock);
|
||||||
dqopt->info[type].dqi_fmt_id,
|
dqopt->flags |=
|
||||||
DQUOT_LIMITS_ENABLED);
|
dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
|
||||||
|
spin_unlock(&dq_state_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2800,8 +2786,10 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state)
|
||||||
tstate->flags |= QCI_LIMITS_ENFORCED;
|
tstate->flags |= QCI_LIMITS_ENFORCED;
|
||||||
tstate->spc_timelimit = mi->dqi_bgrace;
|
tstate->spc_timelimit = mi->dqi_bgrace;
|
||||||
tstate->ino_timelimit = mi->dqi_igrace;
|
tstate->ino_timelimit = mi->dqi_igrace;
|
||||||
tstate->ino = dqopt->files[type]->i_ino;
|
if (dqopt->files[type]) {
|
||||||
tstate->blocks = dqopt->files[type]->i_blocks;
|
tstate->ino = dqopt->files[type]->i_ino;
|
||||||
|
tstate->blocks = dqopt->files[type]->i_blocks;
|
||||||
|
}
|
||||||
tstate->nextents = 1; /* We don't know... */
|
tstate->nextents = 1; /* We don't know... */
|
||||||
spin_unlock(&dq_data_lock);
|
spin_unlock(&dq_data_lock);
|
||||||
}
|
}
|
||||||
|
@ -2860,68 +2848,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
|
||||||
static int do_proc_dqstats(struct ctl_table *table, int write,
|
static int do_proc_dqstats(struct ctl_table *table, int write,
|
||||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||||
{
|
{
|
||||||
unsigned int type = (int *)table->data - dqstats.stat;
|
unsigned int type = (unsigned long *)table->data - dqstats.stat;
|
||||||
|
s64 value = percpu_counter_sum(&dqstats.counter[type]);
|
||||||
|
|
||||||
|
/* Filter negative values for non-monotonic counters */
|
||||||
|
if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
|
||||||
|
type == DQST_FREE_DQUOTS))
|
||||||
|
value = 0;
|
||||||
|
|
||||||
/* Update global table */
|
/* Update global table */
|
||||||
dqstats.stat[type] =
|
dqstats.stat[type] = value;
|
||||||
percpu_counter_sum_positive(&dqstats.counter[type]);
|
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||||
return proc_dointvec(table, write, buffer, lenp, ppos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ctl_table fs_dqstats_table[] = {
|
static struct ctl_table fs_dqstats_table[] = {
|
||||||
{
|
{
|
||||||
.procname = "lookups",
|
.procname = "lookups",
|
||||||
.data = &dqstats.stat[DQST_LOOKUPS],
|
.data = &dqstats.stat[DQST_LOOKUPS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "drops",
|
.procname = "drops",
|
||||||
.data = &dqstats.stat[DQST_DROPS],
|
.data = &dqstats.stat[DQST_DROPS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "reads",
|
.procname = "reads",
|
||||||
.data = &dqstats.stat[DQST_READS],
|
.data = &dqstats.stat[DQST_READS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "writes",
|
.procname = "writes",
|
||||||
.data = &dqstats.stat[DQST_WRITES],
|
.data = &dqstats.stat[DQST_WRITES],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "cache_hits",
|
.procname = "cache_hits",
|
||||||
.data = &dqstats.stat[DQST_CACHE_HITS],
|
.data = &dqstats.stat[DQST_CACHE_HITS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "allocated_dquots",
|
.procname = "allocated_dquots",
|
||||||
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
|
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "free_dquots",
|
.procname = "free_dquots",
|
||||||
.data = &dqstats.stat[DQST_FREE_DQUOTS],
|
.data = &dqstats.stat[DQST_FREE_DQUOTS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "syncs",
|
.procname = "syncs",
|
||||||
.data = &dqstats.stat[DQST_SYNCS],
|
.data = &dqstats.stat[DQST_SYNCS],
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(unsigned long),
|
||||||
.mode = 0444,
|
.mode = 0444,
|
||||||
.proc_handler = do_proc_dqstats,
|
.proc_handler = do_proc_dqstats,
|
||||||
},
|
},
|
||||||
|
@ -2983,11 +2976,7 @@ static int __init dquot_init(void)
|
||||||
|
|
||||||
/* Find power-of-two hlist_heads which can fit into allocation */
|
/* Find power-of-two hlist_heads which can fit into allocation */
|
||||||
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
|
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
|
||||||
dq_hash_bits = 0;
|
dq_hash_bits = ilog2(nr_hash);
|
||||||
do {
|
|
||||||
dq_hash_bits++;
|
|
||||||
} while (nr_hash >> dq_hash_bits);
|
|
||||||
dq_hash_bits--;
|
|
||||||
|
|
||||||
nr_hash = 1UL << dq_hash_bits;
|
nr_hash = 1UL << dq_hash_bits;
|
||||||
dq_hash_mask = nr_hash - 1;
|
dq_hash_mask = nr_hash - 1;
|
||||||
|
|
|
@ -60,8 +60,6 @@ static int quota_sync_all(int type)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (type >= MAXQUOTAS)
|
|
||||||
return -EINVAL;
|
|
||||||
ret = security_quotactl(Q_SYNC, type, 0, NULL);
|
ret = security_quotactl(Q_SYNC, type, 0, NULL);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
iterate_supers(quota_sync_one, &type);
|
iterate_supers(quota_sync_one, &type);
|
||||||
|
@ -686,8 +684,6 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (type >= MAXQUOTAS)
|
|
||||||
return -EINVAL;
|
|
||||||
type = array_index_nospec(type, MAXQUOTAS);
|
type = array_index_nospec(type, MAXQUOTAS);
|
||||||
/*
|
/*
|
||||||
* Quota not supported on this fs? Check this before s_quota_types
|
* Quota not supported on this fs? Check this before s_quota_types
|
||||||
|
@ -831,6 +827,9 @@ int kernel_quotactl(unsigned int cmd, const char __user *special,
|
||||||
cmds = cmd >> SUBCMDSHIFT;
|
cmds = cmd >> SUBCMDSHIFT;
|
||||||
type = cmd & SUBCMDMASK;
|
type = cmd & SUBCMDMASK;
|
||||||
|
|
||||||
|
if (type >= MAXQUOTAS)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* As a special case Q_SYNC can be called without a specific device.
|
* As a special case Q_SYNC can be called without a specific device.
|
||||||
* It will iterate all superblocks that have quota enabled and call
|
* It will iterate all superblocks that have quota enabled and call
|
||||||
|
|
|
@ -217,7 +217,6 @@ static const struct quota_format_ops v1_format_ops = {
|
||||||
.check_quota_file = v1_check_quota_file,
|
.check_quota_file = v1_check_quota_file,
|
||||||
.read_file_info = v1_read_file_info,
|
.read_file_info = v1_read_file_info,
|
||||||
.write_file_info = v1_write_file_info,
|
.write_file_info = v1_write_file_info,
|
||||||
.free_file_info = NULL,
|
|
||||||
.read_dqblk = v1_read_dqblk,
|
.read_dqblk = v1_read_dqblk,
|
||||||
.commit_dqblk = v1_commit_dqblk,
|
.commit_dqblk = v1_commit_dqblk,
|
||||||
};
|
};
|
||||||
|
|
|
@ -38,16 +38,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
|
||||||
|
|
||||||
BUG_ON(!S_ISREG(inode->i_mode));
|
BUG_ON(!S_ISREG(inode->i_mode));
|
||||||
|
|
||||||
if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
|
if (!atomic_dec_and_mutex_lock(&REISERFS_I(inode)->openers,
|
||||||
|
&REISERFS_I(inode)->tailpack))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&REISERFS_I(inode)->tailpack);
|
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
|
|
||||||
mutex_unlock(&REISERFS_I(inode)->tailpack);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fast out for when nothing needs to be done */
|
/* fast out for when nothing needs to be done */
|
||||||
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
|
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
|
||||||
!tail_has_to_be_packed(inode)) &&
|
!tail_has_to_be_packed(inode)) &&
|
||||||
|
|
|
@ -2097,6 +2097,15 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
|
||||||
goto out_inserted_sd;
|
goto out_inserted_sd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark it private if we're creating the privroot
|
||||||
|
* or something under it.
|
||||||
|
*/
|
||||||
|
if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
|
||||||
|
inode->i_flags |= S_PRIVATE;
|
||||||
|
inode->i_opflags &= ~IOP_XATTR;
|
||||||
|
}
|
||||||
|
|
||||||
if (reiserfs_posixacl(inode->i_sb)) {
|
if (reiserfs_posixacl(inode->i_sb)) {
|
||||||
reiserfs_write_unlock(inode->i_sb);
|
reiserfs_write_unlock(inode->i_sb);
|
||||||
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
|
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
|
||||||
|
@ -2111,8 +2120,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
|
||||||
reiserfs_warning(inode->i_sb, "jdm-13090",
|
reiserfs_warning(inode->i_sb, "jdm-13090",
|
||||||
"ACLs aren't enabled in the fs, "
|
"ACLs aren't enabled in the fs, "
|
||||||
"but vfs thinks they are!");
|
"but vfs thinks they are!");
|
||||||
} else if (IS_PRIVATE(dir))
|
}
|
||||||
inode->i_flags |= S_PRIVATE;
|
|
||||||
|
|
||||||
if (security->name) {
|
if (security->name) {
|
||||||
reiserfs_write_unlock(inode->i_sb);
|
reiserfs_write_unlock(inode->i_sb);
|
||||||
|
|
|
@ -377,10 +377,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propagate the private flag so we know we're
|
* Propagate the private flag so we know we're
|
||||||
* in the priv tree
|
* in the priv tree. Also clear IOP_XATTR
|
||||||
|
* since we don't have xattrs on xattr files.
|
||||||
*/
|
*/
|
||||||
if (IS_PRIVATE(dir))
|
if (IS_PRIVATE(dir)) {
|
||||||
inode->i_flags |= S_PRIVATE;
|
inode->i_flags |= S_PRIVATE;
|
||||||
|
inode->i_opflags &= ~IOP_XATTR;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
reiserfs_write_unlock(dir->i_sb);
|
reiserfs_write_unlock(dir->i_sb);
|
||||||
if (retval == IO_ERROR) {
|
if (retval == IO_ERROR) {
|
||||||
|
|
|
@ -1168,6 +1168,8 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
|
||||||
return bmap_nr > ((1LL << 16) - 1);
|
return bmap_nr > ((1LL << 16) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern const struct xattr_handler *reiserfs_xattr_handlers[];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this says about version of key of all items (but stat data) the
|
* this says about version of key of all items (but stat data) the
|
||||||
* object consists of
|
* object consists of
|
||||||
|
|
|
@ -2049,6 +2049,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
|
||||||
if (replay_only(s))
|
if (replay_only(s))
|
||||||
goto error_unlocked;
|
goto error_unlocked;
|
||||||
|
|
||||||
|
s->s_xattr = reiserfs_xattr_handlers;
|
||||||
|
|
||||||
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
|
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
|
||||||
SWARN(silent, s, "clm-7000",
|
SWARN(silent, s, "clm-7000",
|
||||||
"Detected readonly device, marking FS readonly");
|
"Detected readonly device, marking FS readonly");
|
||||||
|
|
|
@ -122,13 +122,13 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags)
|
||||||
struct dentry *xaroot;
|
struct dentry *xaroot;
|
||||||
|
|
||||||
if (d_really_is_negative(privroot))
|
if (d_really_is_negative(privroot))
|
||||||
return ERR_PTR(-ENODATA);
|
return ERR_PTR(-EOPNOTSUPP);
|
||||||
|
|
||||||
inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR);
|
inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR);
|
||||||
|
|
||||||
xaroot = dget(REISERFS_SB(sb)->xattr_root);
|
xaroot = dget(REISERFS_SB(sb)->xattr_root);
|
||||||
if (!xaroot)
|
if (!xaroot)
|
||||||
xaroot = ERR_PTR(-ENODATA);
|
xaroot = ERR_PTR(-EOPNOTSUPP);
|
||||||
else if (d_really_is_negative(xaroot)) {
|
else if (d_really_is_negative(xaroot)) {
|
||||||
int err = -ENODATA;
|
int err = -ENODATA;
|
||||||
|
|
||||||
|
@ -619,6 +619,10 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
|
||||||
int error, error2;
|
int error, error2;
|
||||||
size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
|
size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
|
||||||
|
|
||||||
|
/* Check before we start a transaction and then do nothing. */
|
||||||
|
if (!d_really_is_positive(REISERFS_SB(inode->i_sb)->priv_root))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!(flags & XATTR_REPLACE))
|
if (!(flags & XATTR_REPLACE))
|
||||||
jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
|
jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
|
||||||
|
|
||||||
|
@ -841,8 +845,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
|
||||||
if (d_really_is_negative(dentry))
|
if (d_really_is_negative(dentry))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!dentry->d_sb->s_xattr ||
|
if (get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
|
||||||
get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
|
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
|
dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
|
||||||
|
@ -882,6 +885,7 @@ static int create_privroot(struct dentry *dentry)
|
||||||
}
|
}
|
||||||
|
|
||||||
d_inode(dentry)->i_flags |= S_PRIVATE;
|
d_inode(dentry)->i_flags |= S_PRIVATE;
|
||||||
|
d_inode(dentry)->i_opflags &= ~IOP_XATTR;
|
||||||
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
|
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
|
||||||
"storage.\n", PRIVROOT_NAME);
|
"storage.\n", PRIVROOT_NAME);
|
||||||
|
|
||||||
|
@ -895,7 +899,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Actual operations that are exported to VFS-land */
|
/* Actual operations that are exported to VFS-land */
|
||||||
static const struct xattr_handler *reiserfs_xattr_handlers[] = {
|
const struct xattr_handler *reiserfs_xattr_handlers[] = {
|
||||||
#ifdef CONFIG_REISERFS_FS_XATTR
|
#ifdef CONFIG_REISERFS_FS_XATTR
|
||||||
&reiserfs_xattr_user_handler,
|
&reiserfs_xattr_user_handler,
|
||||||
&reiserfs_xattr_trusted_handler,
|
&reiserfs_xattr_trusted_handler,
|
||||||
|
@ -966,8 +970,10 @@ int reiserfs_lookup_privroot(struct super_block *s)
|
||||||
if (!IS_ERR(dentry)) {
|
if (!IS_ERR(dentry)) {
|
||||||
REISERFS_SB(s)->priv_root = dentry;
|
REISERFS_SB(s)->priv_root = dentry;
|
||||||
d_set_d_op(dentry, &xattr_lookup_poison_ops);
|
d_set_d_op(dentry, &xattr_lookup_poison_ops);
|
||||||
if (d_really_is_positive(dentry))
|
if (d_really_is_positive(dentry)) {
|
||||||
d_inode(dentry)->i_flags |= S_PRIVATE;
|
d_inode(dentry)->i_flags |= S_PRIVATE;
|
||||||
|
d_inode(dentry)->i_opflags &= ~IOP_XATTR;
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
err = PTR_ERR(dentry);
|
err = PTR_ERR(dentry);
|
||||||
inode_unlock(d_inode(s->s_root));
|
inode_unlock(d_inode(s->s_root));
|
||||||
|
@ -996,7 +1002,6 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (d_really_is_positive(privroot)) {
|
if (d_really_is_positive(privroot)) {
|
||||||
s->s_xattr = reiserfs_xattr_handlers;
|
|
||||||
inode_lock(d_inode(privroot));
|
inode_lock(d_inode(privroot));
|
||||||
if (!REISERFS_SB(s)->xattr_root) {
|
if (!REISERFS_SB(s)->xattr_root) {
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
|
|
@ -320,10 +320,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
|
||||||
* would be useless since permissions are ignored, and a pain because
|
* would be useless since permissions are ignored, and a pain because
|
||||||
* it introduces locking cycles
|
* it introduces locking cycles
|
||||||
*/
|
*/
|
||||||
if (IS_PRIVATE(dir)) {
|
if (IS_PRIVATE(inode))
|
||||||
inode->i_flags |= S_PRIVATE;
|
|
||||||
goto apply_umask;
|
goto apply_umask;
|
||||||
}
|
|
||||||
|
|
||||||
err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
|
err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -263,7 +263,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dqstats {
|
struct dqstats {
|
||||||
int stat[_DQST_DQSTAT_LAST];
|
unsigned long stat[_DQST_DQSTAT_LAST];
|
||||||
struct percpu_counter counter[_DQST_DQSTAT_LAST];
|
struct percpu_counter counter[_DQST_DQSTAT_LAST];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
|
||||||
atomic_inc(&dquot->dq_count);
|
atomic_inc(&dquot->dq_count);
|
||||||
return dquot;
|
return dquot;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool dquot_is_busy(struct dquot *dquot)
|
||||||
|
{
|
||||||
|
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
|
||||||
|
return true;
|
||||||
|
if (atomic_read(&dquot->dq_count) > 1)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
void dqput(struct dquot *dquot);
|
void dqput(struct dquot *dquot);
|
||||||
int dquot_scan_active(struct super_block *sb,
|
int dquot_scan_active(struct super_block *sb,
|
||||||
int (*fn)(struct dquot *dquot, unsigned long priv),
|
int (*fn)(struct dquot *dquot, unsigned long priv),
|
||||||
|
@ -87,7 +97,9 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
|
||||||
|
|
||||||
int dquot_file_open(struct inode *inode, struct file *file);
|
int dquot_file_open(struct inode *inode, struct file *file);
|
||||||
|
|
||||||
int dquot_enable(struct inode *inode, int type, int format_id,
|
int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
|
||||||
|
unsigned int flags);
|
||||||
|
int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
int dquot_quota_on(struct super_block *sb, int type, int format_id,
|
int dquot_quota_on(struct super_block *sb, int type, int format_id,
|
||||||
const struct path *path);
|
const struct path *path);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user