From a258edbca2d94a4d902daeecb3073c88b57ac7ad Mon Sep 17 00:00:00 2001 From: Jon Derrick Date: Fri, 10 Jan 2020 14:56:46 -0700 Subject: [PATCH 01/23] MAINTAINERS: Add Revanth Rajashekar as a SED-Opal maintainer Scott hasn't worked for Intel for some time and has already given us his blessing. CC: Scott Bauer Signed-off-by: Revanth Rajashekar Signed-off-by: Jon Derrick Signed-off-by: Jens Axboe --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index e48ab79879ac..a704cb9316e2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14916,8 +14916,8 @@ S: Maintained F: drivers/mmc/host/sdhci-omap.c SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER -M: Scott Bauer M: Jonathan Derrick +M: Revanth Rajashekar L: linux-block@vger.kernel.org S: Supported F: block/sed* From 6a365874a43c43b227492266f59cd68ecc5a6f83 Mon Sep 17 00:00:00 2001 From: Stephen Kitt Date: Fri, 24 Jan 2020 21:03:07 +0100 Subject: [PATCH 02/23] drbd: fifo_alloc() should use struct_size Switching to struct_size for the allocation in fifo_alloc avoids hard-coding the type of fifo_buffer.values in fifo_alloc. It also provides overflow protection; to avoid pessimistic code being generated by the compiler as a result, this patch also switches fifo_size to unsigned, propagating the change as appropriate. Reviewed-by: Gustavo A. R. Silva Signed-off-by: Stephen Kitt Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_nl.c | 3 ++- drivers/block/drbd/drbd_receiver.c | 2 +- drivers/block/drbd/drbd_worker.c | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index ddbf56014c51..aae99a2d7bd4 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -622,7 +622,7 @@ struct fifo_buffer { int total; /* sum of all values */ int values[0]; }; -extern struct fifo_buffer *fifo_alloc(int fifo_size); +extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size); /* flag bits per connection */ enum { diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index de2f94d0103a..da4a3ebe04ef 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1575,7 +1575,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) struct drbd_device *device; struct disk_conf *new_disk_conf, *old_disk_conf; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; - int err, fifo_size; + int err; + unsigned int fifo_size; retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 2b3103c30857..79e216446030 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -3887,7 +3887,7 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; const int apv = connection->agreed_pro_version; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; - int fifo_size = 0; + unsigned int fifo_size = 0; int err; peer_device = conn_peer_device(connection, pi->vnr); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 5bdcc70ad589..b7f605c6e231 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -482,11 +482,11 @@ static void fifo_add_val(struct fifo_buffer *fb, int value) fb->values[i] += value; } -struct fifo_buffer *fifo_alloc(int fifo_size) +struct fifo_buffer *fifo_alloc(unsigned int fifo_size) { struct fifo_buffer *fb; - fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_NOIO); + fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO); if (!fb) return NULL; From 5c0dd228b5fc30a3b732c7ae2657e0161ec7ed80 Mon Sep 17 00:00:00 2001 From: Sun Ke Date: Wed, 22 Jan 2020 11:18:57 +0800 Subject: [PATCH 03/23] nbd: add a flush_workqueue in nbd_start_device When kzalloc fail, may cause trying to destroy the workqueue from inside the workqueue. If num_connections is m (2 < m), and NO.1 ~ NO.n (1 < n < m) kzalloc are successful. The NO.(n + 1) failed. Then, nbd_start_device will return ENOMEM to nbd_start_device_ioctl, and nbd_start_device_ioctl will return immediately without running flush_workqueue. However, we still have n recv threads. If nbd_release run first, recv threads may have to drop the last config_refs and try to destroy the workqueue from inside the workqueue. To fix it, add a flush_workqueue in nbd_start_device. Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs") Signed-off-by: Sun Ke Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index b4607dd96185..78181908f0df 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1265,6 +1265,16 @@ static int nbd_start_device(struct nbd_device *nbd) args = kzalloc(sizeof(*args), GFP_KERNEL); if (!args) { sock_shutdown(nbd); + /* + * If num_connections is m (2 < m), + * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful. + * But NO.(n + 1) failed. We still have n recv threads. + * So, add flush_workqueue here to prevent recv threads + * dropping the last config_refs and trying to destroy + * the workqueue from inside the workqueue. + */ + if (i) + flush_workqueue(nbd->recv_workq); return -ENOMEM; } sk_set_memalloc(config->socks[i]->sock->sk); From 0265d6e8ddb8901c2c03d09f9444f382a60ba6b1 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 17 Jan 2020 15:39:55 +0100 Subject: [PATCH 04/23] xen/blkfront: limit allocated memory size to actual use case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Today the Xen blkfront driver allocates memory for one struct blkfront_ring_info for each communication ring. This structure is statically sized for the maximum supported configuration resulting in a size of more than 90 kB. As the main size contributor is one array inside the struct, the memory allocation can easily be limited by moving this array to be the last structure element and to allocate only the memory for the actually needed array size. Acked-by: Roger Pau Monné Signed-off-by: Juergen Gross Signed-off-by: Jens Axboe --- drivers/block/xen-blkfront.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c02be06c5299..61491167da19 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -151,9 +151,6 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) -#define BLK_MAX_RING_SIZE \ - __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS) - /* * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19 * characters are enough. Define to 20 to keep consistent with backend. @@ -177,12 +174,12 @@ struct blkfront_ring_info { unsigned int evtchn, irq; struct work_struct work; struct gnttab_free_callback callback; - struct blk_shadow shadow[BLK_MAX_RING_SIZE]; struct list_head indirect_pages; struct list_head grants; unsigned int persistent_gnts_c; unsigned long shadow_free; struct blkfront_info *dev_info; + struct blk_shadow shadow[]; }; /* @@ -1915,7 +1912,8 @@ static int negotiate_mq(struct blkfront_info *info) info->nr_rings = 1; info->rinfo = kvcalloc(info->nr_rings, - sizeof(struct blkfront_ring_info), + struct_size(info->rinfo, shadow, + BLK_RING_SIZE(info)), GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); From 5bebf7486d4f4940b2a8e4009beb1dff5041853d Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 1 Feb 2020 22:42:31 +0800 Subject: [PATCH 05/23] bcache: fix memory corruption in bch_cache_accounting_clear() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 83ff9318c44ba ("bcache: not use hard coded memset size in bch_cache_accounting_clear()") tries to make the code more easy to understand by removing the hard coded number with following change, void bch_cache_accounting_clear(...) { memset(&acc->total.cache_hits, 0, - sizeof(unsigned long) * 7); + sizeof(struct cache_stats)); } Unfortunately the change was wrong (it also tells us the original code was not easy to correctly understand). The hard coded number 7 is used because in struct cache_stats, 15 struct cache_stats { 16 struct kobject kobj; 17 18 unsigned long cache_hits; 19 unsigned long cache_misses; 20 unsigned long cache_bypass_hits; 21 unsigned long cache_bypass_misses; 22 unsigned long cache_readaheads; 23 unsigned long cache_miss_collisions; 24 unsigned long sectors_bypassed; 25 26 unsigned int rescale; 27 }; only members in LINE 18-24 want to be set to 0. It is wrong to use 'sizeof(struct cache_stats)' to replace 'sizeof(unsigned long) * 7), the memory objects behind acc->total is staled by this change. Сорокин Артем Сергеевич reports that by the following steps, kernel panic will be triggered, 1. Create new set: make-bcache -B /dev/nvme1n1 -C /dev/sda --wipe-bcache 2. Run in /sys/fs/bcache/: echo 1 > clear_stats && cat stats_five_minute/cache_bypass_hits I can reproduce the panic and get following dmesg with KASAN enabled, [22613.172742] ================================================================== [22613.172862] BUG: KASAN: null-ptr-deref in sysfs_kf_seq_show+0x117/0x230 [22613.172864] Read of size 8 at addr 0000000000000000 by task cat/6753 [22613.172870] CPU: 1 PID: 6753 Comm: cat Not tainted 5.5.0-rc7-lp151.28.16-default+ #11 [22613.172872] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/29/2019 [22613.172873] Call Trace: [22613.172964] dump_stack+0x8b/0xbb [22613.172968] ? sysfs_kf_seq_show+0x117/0x230 [22613.172970] ? sysfs_kf_seq_show+0x117/0x230 [22613.173031] __kasan_report+0x176/0x192 [22613.173064] ? pr_cont_kernfs_name+0x40/0x60 [22613.173067] ? sysfs_kf_seq_show+0x117/0x230 [22613.173070] kasan_report+0xe/0x20 [22613.173072] sysfs_kf_seq_show+0x117/0x230 [22613.173105] seq_read+0x199/0x6d0 [22613.173110] vfs_read+0xa5/0x1a0 [22613.173113] ksys_read+0x110/0x160 [22613.173115] ? kernel_write+0xb0/0xb0 [22613.173177] do_syscall_64+0x77/0x290 [22613.173238] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [22613.173241] RIP: 0033:0x7fc2c886ac61 [22613.173244] Code: fe ff ff 48 8d 3d c7 a0 09 00 48 83 ec 08 e8 46 03 02 00 66 0f 1f 44 00 00 8b 05 ca fb 2c 00 48 63 ff 85 c0 75 13 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 57 f3 c3 0f 1f 44 00 00 55 53 48 89 d5 48 89 [22613.173245] RSP: 002b:00007ffebe776d68 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [22613.173248] RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007fc2c886ac61 [22613.173249] RDX: 0000000000020000 RSI: 00007fc2c8cca000 RDI: 0000000000000003 [22613.173250] RBP: 0000000000020000 R08: ffffffffffffffff R09: 0000000000000000 [22613.173251] R10: 000000000000038c R11: 0000000000000246 R12: 00007fc2c8cca000 [22613.173253] R13: 0000000000000003 R14: 00007fc2c8cca00f R15: 0000000000020000 [22613.173255] ================================================================== [22613.173256] Disabling lock debugging due to kernel taint [22613.173350] BUG: kernel NULL pointer dereference, address: 0000000000000000 [22613.178380] #PF: supervisor read access in kernel mode [22613.180959] #PF: error_code(0x0000) - not-present page [22613.183444] PGD 0 P4D 0 [22613.184867] Oops: 0000 [#1] SMP KASAN PTI [22613.186797] CPU: 1 PID: 6753 Comm: cat Tainted: G B 5.5.0-rc7-lp151.28.16-default+ #11 [22613.191253] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/29/2019 [22613.196706] RIP: 0010:sysfs_kf_seq_show+0x117/0x230 [22613.199097] Code: ff 48 8b 0b 48 8b 44 24 08 48 01 e9 eb a6 31 f6 48 89 cf ba 00 10 00 00 48 89 4c 24 10 e8 b1 e6 e9 ff 4c 89 ff e8 19 07 ea ff <49> 8b 07 48 85 c0 48 89 44 24 08 0f 84 91 00 00 00 49 8b 6d 00 48 [22613.208016] RSP: 0018:ffff8881d4f8fd78 EFLAGS: 00010246 [22613.210448] RAX: 0000000000000000 RBX: ffff8881eb99b180 RCX: ffffffff810d9ef6 [22613.213691] RDX: 0000000000000001 RSI: 0000000000000246 RDI: 0000000000000246 [22613.216893] RBP: 0000000000001000 R08: fffffbfff072ddcd R09: fffffbfff072ddcd [22613.220075] R10: 0000000000000001 R11: fffffbfff072ddcc R12: ffff8881de5c0200 [22613.223256] R13: ffff8881ed175500 R14: ffff8881eb99b198 R15: 0000000000000000 [22613.226290] FS: 00007fc2c8d3d500(0000) GS:ffff8881f2a80000(0000) knlGS:0000000000000000 [22613.229637] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [22613.231993] CR2: 0000000000000000 CR3: 00000001ec89a004 CR4: 00000000003606e0 [22613.234909] Call Trace: [22613.235931] seq_read+0x199/0x6d0 [22613.237259] vfs_read+0xa5/0x1a0 [22613.239229] ksys_read+0x110/0x160 [22613.240590] ? kernel_write+0xb0/0xb0 [22613.242040] do_syscall_64+0x77/0x290 [22613.243625] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [22613.245450] RIP: 0033:0x7fc2c886ac61 [22613.246706] Code: fe ff ff 48 8d 3d c7 a0 09 00 48 83 ec 08 e8 46 03 02 00 66 0f 1f 44 00 00 8b 05 ca fb 2c 00 48 63 ff 85 c0 75 13 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 57 f3 c3 0f 1f 44 00 00 55 53 48 89 d5 48 89 [22613.253296] RSP: 002b:00007ffebe776d68 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [22613.255835] RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007fc2c886ac61 [22613.258472] RDX: 0000000000020000 RSI: 00007fc2c8cca000 RDI: 0000000000000003 [22613.260807] RBP: 0000000000020000 R08: ffffffffffffffff R09: 0000000000000000 [22613.263188] R10: 000000000000038c R11: 0000000000000246 R12: 00007fc2c8cca000 [22613.265598] R13: 0000000000000003 R14: 00007fc2c8cca00f R15: 0000000000020000 [22613.268729] Modules linked in: scsi_transport_iscsi af_packet iscsi_ibft iscsi_boot_sysfs vmw_vsock_vmci_transport vsock fuse bnep kvm_intel kvm irqbypass crc32_pclmul crc32c_intel ghash_clmulni_intel snd_ens1371 snd_ac97_codec ac97_bus bcache snd_pcm btusb btrtl btbcm btintel crc64 aesni_intel glue_helper crypto_simd vmw_balloon cryptd bluetooth snd_timer snd_rawmidi snd joydev pcspkr e1000 rfkill vmw_vmci soundcore ecdh_generic ecc gameport i2c_piix4 mptctl ac button hid_generic usbhid sr_mod cdrom ata_generic ehci_pci vmwgfx uhci_hcd drm_kms_helper syscopyarea serio_raw sysfillrect sysimgblt fb_sys_fops ttm ehci_hcd mptspi scsi_transport_spi mptscsih ata_piix mptbase ahci usbcore libahci drm sg dm_multipath dm_mod scsi_dh_rdac scsi_dh_emc scsi_dh_alua [22613.292429] CR2: 0000000000000000 [22613.293563] ---[ end trace a074b26a8508f378 ]--- [22613.295138] RIP: 0010:sysfs_kf_seq_show+0x117/0x230 [22613.296769] Code: ff 48 8b 0b 48 8b 44 24 08 48 01 e9 eb a6 31 f6 48 89 cf ba 00 10 00 00 48 89 4c 24 10 e8 b1 e6 e9 ff 4c 89 ff e8 19 07 ea ff <49> 8b 07 48 85 c0 48 89 44 24 08 0f 84 91 00 00 00 49 8b 6d 00 48 [22613.303553] RSP: 0018:ffff8881d4f8fd78 EFLAGS: 00010246 [22613.305280] RAX: 0000000000000000 RBX: ffff8881eb99b180 RCX: ffffffff810d9ef6 [22613.307924] RDX: 0000000000000001 RSI: 0000000000000246 RDI: 0000000000000246 [22613.310272] RBP: 0000000000001000 R08: fffffbfff072ddcd R09: fffffbfff072ddcd [22613.312685] R10: 0000000000000001 R11: fffffbfff072ddcc R12: ffff8881de5c0200 [22613.315076] R13: ffff8881ed175500 R14: ffff8881eb99b198 R15: 0000000000000000 [22613.318116] FS: 00007fc2c8d3d500(0000) GS:ffff8881f2a80000(0000) knlGS:0000000000000000 [22613.320743] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [22613.322628] CR2: 0000000000000000 CR3: 00000001ec89a004 CR4: 00000000003606e0 Here this patch fixes the following problem by explicity set all the 7 members to 0 in bch_cache_accounting_clear(). Reported-by: Сорокин Артем Сергеевич Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/stats.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index ba1c93791d8d..503aafe188dc 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -109,9 +109,13 @@ int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, void bch_cache_accounting_clear(struct cache_accounting *acc) { - memset(&acc->total.cache_hits, - 0, - sizeof(struct cache_stats)); + acc->total.cache_hits = 0; + acc->total.cache_misses = 0; + acc->total.cache_bypass_hits = 0; + acc->total.cache_bypass_misses = 0; + acc->total.cache_readaheads = 0; + acc->total.cache_miss_collisions = 0; + acc->total.sectors_bypassed = 0; } void bch_cache_accounting_destroy(struct cache_accounting *acc) From 7c02b0055f774ed9afb6e1c7724f33bf148ffdc0 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 1 Feb 2020 22:42:32 +0800 Subject: [PATCH 06/23] bcache: explicity type cast in bset_bkey_last() In bset.h, macro bset_bkey_last() is defined as, bkey_idx((struct bkey *) (i)->d, (i)->keys) Parameter i can be variable type of data structure, the macro always works once the type of struct i has member 'd' and 'keys'. bset_bkey_last() is also used in macro csum_set() to calculate the checksum of a on-disk data structure. When csum_set() is used to calculate checksum of on-disk bcache super block, the parameter 'i' data type is struct cache_sb_disk. Inside struct cache_sb_disk (also in struct cache_sb) the member keys is __u16 type. But bkey_idx() expects unsigned int (a 32bit width), so there is problem when sending parameters via stack to call bkey_idx(). Sparse tool from Intel 0day kbuild system reports this incompatible problem. bkey_idx() is part of user space API, so the simplest fix is to cast the (i)->keys to unsigned int type in macro bset_bkey_last(). Reported-by: kbuild test robot Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index c71365e7c1fa..a50dcfda656f 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state); /* Bkey utility code */ -#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) +#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \ + (unsigned int)(i)->keys) static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx) { From 038ba8cc1bffc51250add4a9b9249d4331576d8f Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 1 Feb 2020 22:42:33 +0800 Subject: [PATCH 07/23] bcache: add readahead cache policy options via sysfs interface In year 2007 high performance SSD was still expensive, in order to save more space for real workload or meta data, the readahead I/Os for non-meta data was bypassed and not cached on SSD. In now days, SSD price drops a lot and people can find larger size SSD with more comfortable price. It is unncessary to alway bypass normal readahead I/Os to save SSD space for now. This patch adds options for readahead data cache policies via sysfs file /sys/block/bcache/readahead_cache_policy, the options are, - "all": cache all readahead data I/Os. - "meta-only": only cache meta data, and bypass other regular I/Os. If users want to make bcache continue to only cache readahead request for metadata and bypass regular data readahead, please set "meta-only" to this sysfs file. By default, bcache will back to cache all read- ahead requests now. Cc: stable@vger.kernel.org Signed-off-by: Coly Li Acked-by: Eric Wheeler Cc: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 3 +++ drivers/md/bcache/request.c | 17 ++++++++++++----- drivers/md/bcache/sysfs.c | 22 ++++++++++++++++++++++ 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index adf26a21fcd1..74a9849ea164 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -330,6 +330,9 @@ struct cached_dev { */ atomic_t has_dirty; +#define BCH_CACHE_READA_ALL 0 +#define BCH_CACHE_READA_META_ONLY 1 + unsigned int cache_readahead_policy; struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 73478a91a342..820d8402a1dc 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -379,13 +379,20 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) goto skip; /* - * Flag for bypass if the IO is for read-ahead or background, - * unless the read-ahead request is for metadata + * If the bio is for read-ahead or background IO, bypass it or + * not depends on the following situations, + * - If the IO is for meta data, always cache it and no bypass + * - If the IO is not meta data, check dc->cache_reada_policy, + * BCH_CACHE_READA_ALL: cache it and not bypass + * BCH_CACHE_READA_META_ONLY: not cache it and bypass + * That is, read-ahead request for metadata always get cached * (eg, for gfs2 or xfs). */ - if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && - !(bio->bi_opf & (REQ_META|REQ_PRIO))) - goto skip; + if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { + if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && + (dc->cache_readahead_policy != BCH_CACHE_READA_ALL)) + goto skip; + } if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || bio_sectors(bio) & (c->sb.block_size - 1)) { diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 733e2ddf3c78..3470fae4eabc 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -27,6 +27,12 @@ static const char * const bch_cache_modes[] = { NULL }; +static const char * const bch_reada_cache_policies[] = { + "all", + "meta-only", + NULL +}; + /* Default is 0 ("auto") */ static const char * const bch_stop_on_failure_modes[] = { "auto", @@ -100,6 +106,7 @@ rw_attribute(congested_write_threshold_us); rw_attribute(sequential_cutoff); rw_attribute(data_csum); rw_attribute(cache_mode); +rw_attribute(readahead_cache_policy); rw_attribute(stop_when_cache_set_failed); rw_attribute(writeback_metadata); rw_attribute(writeback_running); @@ -168,6 +175,11 @@ SHOW(__bch_cached_dev) bch_cache_modes, BDEV_CACHE_MODE(&dc->sb)); + if (attr == &sysfs_readahead_cache_policy) + return bch_snprint_string_list(buf, PAGE_SIZE, + bch_reada_cache_policies, + dc->cache_readahead_policy); + if (attr == &sysfs_stop_when_cache_set_failed) return bch_snprint_string_list(buf, PAGE_SIZE, bch_stop_on_failure_modes, @@ -353,6 +365,15 @@ STORE(__cached_dev) } } + if (attr == &sysfs_readahead_cache_policy) { + v = __sysfs_match_string(bch_reada_cache_policies, -1, buf); + if (v < 0) + return v; + + if ((unsigned int) v != dc->cache_readahead_policy) + dc->cache_readahead_policy = v; + } + if (attr == &sysfs_stop_when_cache_set_failed) { v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf); if (v < 0) @@ -467,6 +488,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_data_csum, #endif &sysfs_cache_mode, + &sysfs_readahead_cache_policy, &sysfs_stop_when_cache_set_failed, &sysfs_writeback_metadata, &sysfs_writeback_running, From d1c3cc34f5a78b38d2b809b289d912c3560545df Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 1 Feb 2020 22:42:34 +0800 Subject: [PATCH 08/23] bcache: fix incorrect data type usage in btree_flush_write() Dan Carpenter points out that from commit 2aa8c529387c ("bcache: avoid unnecessary btree nodes flushing in btree_flush_write()"), there is a incorrect data type usage which leads to the following static checker warning: drivers/md/bcache/journal.c:444 btree_flush_write() warn: 'ref_nr' unsigned <= 0 drivers/md/bcache/journal.c 422 static void btree_flush_write(struct cache_set *c) 423 { 424 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; 425 unsigned int i, nr, ref_nr; ^^^^^^ 426 atomic_t *fifo_front_p, *now_fifo_front_p; 427 size_t mask; 428 429 if (c->journal.btree_flushing) 430 return; 431 432 spin_lock(&c->journal.flush_write_lock); 433 if (c->journal.btree_flushing) { 434 spin_unlock(&c->journal.flush_write_lock); 435 return; 436 } 437 c->journal.btree_flushing = true; 438 spin_unlock(&c->journal.flush_write_lock); 439 440 /* get the oldest journal entry and check its refcount */ 441 spin_lock(&c->journal.lock); 442 fifo_front_p = &fifo_front(&c->journal.pin); 443 ref_nr = atomic_read(fifo_front_p); 444 if (ref_nr <= 0) { ^^^^^^^^^^^ Unsigned can't be less than zero. 445 /* 446 * do nothing if no btree node references 447 * the oldest journal entry 448 */ 449 spin_unlock(&c->journal.lock); 450 goto out; 451 } 452 spin_unlock(&c->journal.lock); As the warning information indicates, local varaible ref_nr in unsigned int type is wrong, which does not matche atomic_read() and the "<= 0" checking. This patch fixes the above error by defining local variable ref_nr as int type. Fixes: 2aa8c529387c ("bcache: avoid unnecessary btree nodes flushing in btree_flush_write()") Reported-by: Dan Carpenter Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 33ddc5269e8d..6730820780b0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -422,7 +422,8 @@ err: static void btree_flush_write(struct cache_set *c) { struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR]; - unsigned int i, nr, ref_nr; + unsigned int i, nr; + int ref_nr; atomic_t *fifo_front_p, *now_fifo_front_p; size_t mask; From 49d08d596e85f39ded48e85df362827cbab1f5ae Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 1 Feb 2020 22:42:35 +0800 Subject: [PATCH 09/23] bcache: check return value of prio_read() Now if prio_read() failed during starting a cache set, we can print out error message in run_cache_set() and handle the failure properly. Signed-off-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3dea1d5acd5c..2749daf09724 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -609,12 +609,13 @@ int bch_prio_write(struct cache *ca, bool wait) return 0; } -static void prio_read(struct cache *ca, uint64_t bucket) +static int prio_read(struct cache *ca, uint64_t bucket) { struct prio_set *p = ca->disk_buckets; struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; struct bucket *b; unsigned int bucket_nr = 0; + int ret = -EIO; for (b = ca->buckets; b < ca->buckets + ca->sb.nbuckets; @@ -627,11 +628,15 @@ static void prio_read(struct cache *ca, uint64_t bucket) prio_io(ca, bucket, REQ_OP_READ, 0); if (p->csum != - bch_crc64(&p->magic, bucket_bytes(ca) - 8)) + bch_crc64(&p->magic, bucket_bytes(ca) - 8)) { pr_warn("bad csum reading priorities"); + goto out; + } - if (p->magic != pset_magic(&ca->sb)) + if (p->magic != pset_magic(&ca->sb)) { pr_warn("bad magic reading priorities"); + goto out; + } bucket = p->next_bucket; d = p->data; @@ -640,6 +645,10 @@ static void prio_read(struct cache *ca, uint64_t bucket) b->prio = le16_to_cpu(d->prio); b->gen = b->last_gc = d->gen; } + + ret = 0; +out: + return ret; } /* Bcache device */ @@ -1873,8 +1882,10 @@ static int run_cache_set(struct cache_set *c) j = &list_entry(journal.prev, struct journal_replay, list)->j; err = "IO error reading priorities"; - for_each_cache(ca, c, i) - prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]); + for_each_cache(ca, c, i) { + if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) + goto err; + } /* * If prio_read() fails it'll call cache_set_error and we'll From f718b093277df582fbf8775548a4f163e664d282 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:54 +0100 Subject: [PATCH 10/23] block, bfq: do not plug I/O for bfq_queues with no proc refs Commit 478de3380c1c ("block, bfq: deschedule empty bfq_queues not referred by any process") fixed commit 3726112ec731 ("block, bfq: re-schedule empty queues if they deserve I/O plugging") by descheduling an empty bfq_queue when it remains with not process reference. Yet, this still left a case uncovered: an empty bfq_queue with not process reference that remains in service. This happens for an in-service sync bfq_queue that is deemed to deserve I/O-dispatch plugging when it remains empty. Yet no new requests will arrive for such a bfq_queue if no process sends requests to it any longer. Even worse, the bfq_queue may happen to be prematurely freed while still in service (because there may remain no reference to it any longer). This commit solves this problem by preventing I/O dispatch from being plugged for the in-service bfq_queue, if the latter has no process reference (the bfq_queue is then prevented from remaining in service). Fixes: 3726112ec731 ("block, bfq: re-schedule empty queues if they deserve I/O plugging") Tested-by: Oleksandr Natalenko Reported-by: Patrick Dung Tested-by: Patrick Dung Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 4686b68b48b4..55d4328e7c12 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3443,6 +3443,10 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, struct bfq_queue *bfqq) { + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + return (bfqq->wr_coeff > 1 && (bfqd->wr_busy_queues < bfq_tot_busy_queues(bfqd) || @@ -4076,6 +4080,10 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, bfqq_sequential_and_IO_bound, idling_boosts_thr; + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); @@ -4169,6 +4177,10 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) struct bfq_data *bfqd = bfqq->bfqd; bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; + /* No point in idling for bfqq if it won't get requests any longer */ + if (unlikely(!bfqq_process_refs(bfqq))) + return false; + if (unlikely(bfqd->strict_guarantees)) return true; From 32c59e3a9a5a0b180dd015755d6d18ca31e55935 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:55 +0100 Subject: [PATCH 11/23] block, bfq: do not insert oom queue into position tree BFQ maintains an ordered list, implemented with an RB tree, of head-request positions of non-empty bfq_queues. This position tree, inherited from CFQ, is used to find bfq_queues that contain I/O close to each other. BFQ merges these bfq_queues into a single shared queue, if this boosts throughput on the device at hand. There is however a special-purpose bfq_queue that does not participate in queue merging, the oom bfq_queue. Yet, also this bfq_queue could be wrongly added to the position tree. So bfqq_find_close() could return the oom bfq_queue, which is a source of further troubles in an out-of-memory situation. This commit prevents the oom bfq_queue from being inserted into the position tree. Tested-by: Patrick Dung Tested-by: Oleksandr Natalenko Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 55d4328e7c12..15dfb0844644 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -613,6 +613,10 @@ bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqq->pos_root = NULL; } + /* oom_bfqq does not participate in queue merging */ + if (bfqq == &bfqd->oom_bfqq) + return; + /* * bfqq cannot be merged any longer (see comments in * bfq_setup_cooperator): no point in adding bfqq into the From ecedd3d7e19911ab8fe42f17b77c0a30fe7f4db3 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:56 +0100 Subject: [PATCH 12/23] block, bfq: get extra ref to prevent a queue from being freed during a group move In bfq_bfqq_move(), the bfq_queue, say Q, to be moved to a new group may happen to be deactivated in the scheduling data structures of the source group (and then activated in the destination group). If Q is referred only by the data structures in the source group when the deactivation happens, then Q is freed upon the deactivation. This commit addresses this issue by getting an extra reference before the possible deactivation, and releasing this extra reference after Q has been moved. Tested-by: Chris Evich Tested-by: Oleksandr Natalenko Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index e1419edde2ec..8ab7f18ff8cb 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -651,6 +651,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_bfqq_expire(bfqd, bfqd->in_service_queue, false, BFQQE_PREEMPTED); + /* + * get extra reference to prevent bfqq from being freed in + * next possible deactivate + */ + bfqq->ref++; + if (bfq_bfqq_busy(bfqq)) bfq_deactivate_bfqq(bfqd, bfqq, false, false); else if (entity->on_st) @@ -670,6 +676,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (!bfqd->in_service_queue && !bfqd->rq_in_driver) bfq_schedule_dispatch(bfqd); + /* release extra ref taken above */ + bfq_put_queue(bfqq); } /** From 33a16a9804688b2f4c4281ec31bc393ef2645ae4 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:57 +0100 Subject: [PATCH 13/23] block, bfq: extend incomplete name of field on_st The flag on_st in the bfq_entity data structure is true if the entity is on a service tree or is in service. Yet the name of the field, confusingly, does not mention the second, very important case. Extend the name to mention the second case too. Tested-by: Oleksandr Natalenko Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 2 +- block/bfq-iosched.c | 2 +- block/bfq-iosched.h | 2 +- block/bfq-wf2q.c | 11 +++++++---- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 8ab7f18ff8cb..c818c64766e5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -659,7 +659,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_bfqq_busy(bfqq)) bfq_deactivate_bfqq(bfqd, bfqq, false, false); - else if (entity->on_st) + else if (entity->on_st_or_in_serv) bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); bfqg_and_blkg_put(bfqq_group(bfqq)); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 15dfb0844644..28770ec7c06f 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1059,7 +1059,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, static int bfqq_process_refs(struct bfq_queue *bfqq) { - return bfqq->ref - bfqq->allocated - bfqq->entity.on_st - + return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv - (bfqq->weight_counter != NULL); } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 8526f20c53bc..f1cb89def7f8 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -150,7 +150,7 @@ struct bfq_entity { * Flag, true if the entity is on a tree (either the active or * the idle one of its service_tree) or is in service. */ - bool on_st; + bool on_st_or_in_serv; /* B-WF2Q+ start and finish timestamps [sectors/weight] */ u64 start, finish; diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index ffe9ce9faa89..26776bdbdf36 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -645,7 +645,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st, { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); - entity->on_st = false; + entity->on_st_or_in_serv = false; st->wsum -= entity->weight; if (bfqq && !is_in_service) bfq_put_queue(bfqq); @@ -999,7 +999,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity, */ bfq_get_entity(entity); - entity->on_st = true; + entity->on_st_or_in_serv = true; } #ifdef CONFIG_BFQ_GROUP_IOSCHED @@ -1165,7 +1165,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) struct bfq_service_tree *st; bool is_in_service; - if (!entity->on_st) /* entity never activated, or already inactive */ + if (!entity->on_st_or_in_serv) /* + * entity never activated, or + * already inactive + */ return false; /* @@ -1620,7 +1623,7 @@ bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) * service tree either, then release the service reference to * the queue it represents (taken with bfq_get_entity). */ - if (!in_serv_entity->on_st) { + if (!in_serv_entity->on_st_or_in_serv) { /* * If no process is referencing in_serv_bfqq any * longer, then the service reference may be the only From 4d8340d0d4d90e7ca367d18ec16c2fefa89a339c Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:58 +0100 Subject: [PATCH 14/23] block, bfq: remove ifdefs from around gets/puts of bfq groups ifdefs around gets and puts of bfq groups reduce readability, remove them. Tested-by: Oleksandr Natalenko Reported-by: Jens Axboe Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 4 ++++ block/bfq-iosched.c | 6 +----- block/bfq-iosched.h | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index c818c64766e5..cae488b58049 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -1406,6 +1406,10 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) return bfqq->bfqd->root_group; } +void bfqg_and_blkg_get(struct bfq_group *bfqg) {} + +void bfqg_and_blkg_put(struct bfq_group *bfqg) {} + struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) { struct bfq_group *bfqg; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 28770ec7c06f..fff76c920968 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4825,9 +4825,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) { struct bfq_queue *item; struct hlist_node *n; -#ifdef CONFIG_BFQ_GROUP_IOSCHED struct bfq_group *bfqg = bfqq_group(bfqq); -#endif if (bfqq->bfqd) bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", @@ -4900,9 +4898,7 @@ void bfq_put_queue(struct bfq_queue *bfqq) bfqq->bfqd->last_completed_rq_bfqq = NULL; kmem_cache_free(bfq_pool, bfqq); -#ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_and_blkg_put(bfqg); -#endif } static void bfq_put_cooperator(struct bfq_queue *bfqq) @@ -6390,10 +6386,10 @@ static void bfq_exit_queue(struct elevator_queue *e) hrtimer_cancel(&bfqd->idle_slice_timer); -#ifdef CONFIG_BFQ_GROUP_IOSCHED /* release oom-queue reference to root group */ bfqg_and_blkg_put(bfqd->root_group); +#ifdef CONFIG_BFQ_GROUP_IOSCHED blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); #else spin_lock_irq(&bfqd->lock); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index f1cb89def7f8..2c7cec737b2a 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -921,6 +921,7 @@ struct bfq_group { #else struct bfq_group { + struct bfq_entity entity; struct bfq_sched_data sched_data; struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; From db37a34c563bf4692b36990ae89005c031385e52 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:40:59 +0100 Subject: [PATCH 15/23] block, bfq: get a ref to a group when adding it to a service tree BFQ schedules generic entities, which may represent either bfq_queues or groups of bfq_queues. When an entity is inserted into a service tree, a reference must be taken, to make sure that the entity does not disappear while still referred in the tree. Unfortunately, such a reference is mistakenly taken only if the entity represents a bfq_queue. This commit takes a reference also in case the entity represents a group. Tested-by: Oleksandr Natalenko Tested-by: Chris Evich Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 2 +- block/bfq-iosched.h | 1 + block/bfq-wf2q.c | 12 ++++++++++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index cae488b58049..09b69a3ed490 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -static void bfqg_and_blkg_get(struct bfq_group *bfqg) +void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 2c7cec737b2a..d1233af9c684 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -985,6 +985,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); +void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 26776bdbdf36..eb0e2a6daabe 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -533,7 +533,9 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } + } else + bfqg_and_blkg_get(container_of(entity, struct bfq_group, + entity)); } /** @@ -647,8 +649,14 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st_or_in_serv = false; st->wsum -= entity->weight; - if (bfqq && !is_in_service) + if (is_in_service) + return; + + if (bfqq) bfq_put_queue(bfqq); + else + bfqg_and_blkg_put(container_of(entity, struct bfq_group, + entity)); } /** From c92bddee77353a773f0df76115c1a01877ce1eae Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 3 Feb 2020 11:41:00 +0100 Subject: [PATCH 16/23] block, bfq: clarify the goal of bfq_split_bfqq() The exact, general goal of the function bfq_split_bfqq() is not that apparent. Add a comment to make it clear. Tested-by: Oleksandr Natalenko Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index fff76c920968..8c436abfaf14 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5979,6 +5979,8 @@ static void bfq_finish_requeue_request(struct request *rq) } /* + * Removes the association between the current task and bfqq, assuming + * that bic points to the bfq iocontext of the task. * Returns NULL if a new bfqq should be allocated, or the old bfqq if this * was the last process referring to that bfqq. */ From 4ac76436a6d07dec1c3c766f234aa787a16e8f65 Mon Sep 17 00:00:00 2001 From: Amol Grover Date: Sat, 11 Jan 2020 13:08:16 +0530 Subject: [PATCH 17/23] nvmet: Pass lockdep expression to RCU lists ctrl->subsys->namespaces and subsys->namespaces are traversed with list_for_each_entry_rcu outside an RCU read-side critical section but under the protection of ctrl->subsys->lock and subsys->lock respectively. Hence, add the corresponding lockdep expression to the list traversal primitive to silence false-positive lockdep warnings, and harden RCU lists. Reported-by: kbuild test robot Reviewed-by: Joel Fernandes (Google) Signed-off-by: Amol Grover Signed-off-by: Keith Busch --- drivers/nvme/target/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 28438b833c1b..35810a0a8d21 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -555,7 +555,8 @@ int nvmet_ns_enable(struct nvmet_ns *ns) } else { struct nvmet_ns *old; - list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { + list_for_each_entry_rcu(old, &subsys->namespaces, dev_link, + lockdep_is_held(&subsys->lock)) { BUG_ON(ns->nsid == old->nsid); if (ns->nsid < old->nsid) break; @@ -1172,7 +1173,8 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, ctrl->p2p_client = get_device(req->p2p_client); - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link, + lockdep_is_held(&ctrl->subsys->lock)) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } From b716e6889c95f64ba32af492461f6cc9341f3f05 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 26 Jan 2020 23:23:28 -0800 Subject: [PATCH 18/23] nvmet: fix dsm failure when payload does not match sgl descriptor The host is allowed to pass the controller an sgl describing a buffer that is larger than the dsm payload itself, allow it when executing dsm. Reported-by: Dakshaja Uppalapati Reviewed-by: Christoph Hellwig , Reviewed-by: Max Gurtovoy Signed-off-by: Sagi Grimberg Signed-off-by: Keith Busch --- drivers/nvme/target/core.c | 11 +++++++++++ drivers/nvme/target/io-cmd-bdev.c | 2 +- drivers/nvme/target/io-cmd-file.c | 2 +- drivers/nvme/target/nvmet.h | 1 + 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 35810a0a8d21..461987f669c5 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -939,6 +939,17 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len) } EXPORT_SYMBOL_GPL(nvmet_check_data_len); +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) +{ + if (unlikely(data_len > req->transfer_len)) { + req->error_loc = offsetof(struct nvme_common_command, dptr); + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + return false; + } + + return true; +} + int nvmet_req_alloc_sgl(struct nvmet_req *req) { struct pci_dev *p2p_dev = NULL; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index b6fca0e421ef..ea0e596be15d 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -280,7 +280,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req) static void nvmet_bdev_execute_dsm(struct nvmet_req *req) { - if (!nvmet_check_data_len(req, nvmet_dsm_len(req))) + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) return; switch (le32_to_cpu(req->cmd->dsm.attributes)) { diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index caebfce06605..cd5670b83118 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -336,7 +336,7 @@ static void nvmet_file_dsm_work(struct work_struct *w) static void nvmet_file_execute_dsm(struct nvmet_req *req) { - if (!nvmet_check_data_len(req, nvmet_dsm_len(req))) + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) return; INIT_WORK(&req->f.work, nvmet_file_dsm_work); schedule_work(&req->f.work); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 46df45e837c9..eda28b22a2c8 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -374,6 +374,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); void nvmet_req_complete(struct nvmet_req *req, u16 status); int nvmet_req_alloc_sgl(struct nvmet_req *req); void nvmet_req_free_sgl(struct nvmet_req *req); From cfa27356f835dc7755192e7b941d4f4851acbcc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Jan 2020 19:40:24 +0100 Subject: [PATCH 19/23] nvme-pci: remove nvmeq->tags There is no real need to have a pointer to the tagset in struct nvme_queue, as we only need it in a single place, and that place can derive the used tagset from the device and qid trivially. This fixes a problem with stale pointer exposure when tagsets are reset, and also shrinks the nvme_queue structure. It also matches what most other transports have done since day 1. Reported-by: Edmund Nadolski Signed-off-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 365a2ddbeaa7..da392b50f73e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -167,7 +167,6 @@ struct nvme_queue { /* only used for poll queues: */ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; volatile struct nvme_completion *cqes; - struct blk_mq_tags **tags; dma_addr_t sq_dma_addr; dma_addr_t cq_dma_addr; u32 __iomem *q_db; @@ -376,29 +375,17 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, WARN_ON(hctx_idx != 0); WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); - WARN_ON(nvmeq->tags); hctx->driver_data = nvmeq; - nvmeq->tags = &dev->admin_tagset.tags[0]; return 0; } -static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) -{ - struct nvme_queue *nvmeq = hctx->driver_data; - - nvmeq->tags = NULL; -} - static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; - if (!nvmeq->tags) - nvmeq->tags = &dev->tagset.tags[hctx_idx]; - WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); hctx->driver_data = nvmeq; return 0; @@ -948,6 +935,13 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) writel(head, nvmeq->q_db + nvmeq->dev->db_stride); } +static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) +{ + if (!nvmeq->qid) + return nvmeq->dev->admin_tagset.tags[0]; + return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; +} + static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) { volatile struct nvme_completion *cqe = &nvmeq->cqes[idx]; @@ -972,7 +966,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) return; } - req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); + req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); nvme_end_request(req, cqe->status, cqe->result); } @@ -1572,7 +1566,6 @@ static const struct blk_mq_ops nvme_mq_admin_ops = { .queue_rq = nvme_queue_rq, .complete = nvme_pci_complete_rq, .init_hctx = nvme_admin_init_hctx, - .exit_hctx = nvme_admin_exit_hctx, .init_request = nvme_init_request, .timeout = nvme_timeout, }; From c8ab422553c81a0eb070329c63725df1cd1425bc Mon Sep 17 00:00:00 2001 From: Zhiqiang Liu Date: Tue, 4 Feb 2020 19:30:20 +0800 Subject: [PATCH 20/23] brd: check and limit max_part par In brd_init func, rd_nr num of brd_device are firstly allocated and add in brd_devices, then brd_devices are traversed to add each brd_device by calling add_disk func. When allocating brd_device, the disk->first_minor is set to i * max_part, if rd_nr * max_part is larger than MINORMASK, two different brd_device may have the same devt, then only one of them can be successfully added. when rmmod brd.ko, it will cause oops when calling brd_exit. Follow those steps: # modprobe brd rd_nr=3 rd_size=102400 max_part=1048576 # rmmod brd then, the oops will appear. Oops log: [ 726.613722] Call trace: [ 726.614175] kernfs_find_ns+0x24/0x130 [ 726.614852] kernfs_find_and_get_ns+0x44/0x68 [ 726.615749] sysfs_remove_group+0x38/0xb0 [ 726.616520] blk_trace_remove_sysfs+0x1c/0x28 [ 726.617320] blk_unregister_queue+0x98/0x100 [ 726.618105] del_gendisk+0x144/0x2b8 [ 726.618759] brd_exit+0x68/0x560 [brd] [ 726.619501] __arm64_sys_delete_module+0x19c/0x2a0 [ 726.620384] el0_svc_common+0x78/0x130 [ 726.621057] el0_svc_handler+0x38/0x78 [ 726.621738] el0_svc+0x8/0xc [ 726.622259] Code: aa0203f6 aa0103f7 aa1e03e0 d503201f (7940e260) Here, we add brd_check_and_reset_par func to check and limit max_part par. -- V5->V6: - remove useless code V4->V5:(suggested by Ming Lei) - make sure max_part is not larger than DISK_MAX_PARTS V3->V4:(suggested by Ming Lei) - remove useless change - add one limit of max_part V2->V3: (suggested by Ming Lei) - clear .minors when running out of consecutive minor space in brd_alloc - remove limit of rd_nr V1->V2: - add more checks in brd_check_par_valid as suggested by Ming Lei. Signed-off-by: Zhiqiang Liu Reviewed-by: Bob Liu Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/block/brd.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index a8730cc4db10..220c5e18aba0 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -473,6 +473,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) return kobj; } +static inline void brd_check_and_reset_par(void) +{ + if (unlikely(!max_part)) + max_part = 1; + + /* + * make sure 'max_part' can be divided exactly by (1U << MINORBITS), + * otherwise, it is possiable to get same dev_t when adding partitions. + */ + if ((1U << MINORBITS) % max_part != 0) + max_part = 1UL << fls(max_part); + + if (max_part > DISK_MAX_PARTS) { + pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n", + DISK_MAX_PARTS, DISK_MAX_PARTS); + max_part = DISK_MAX_PARTS; + } +} + static int __init brd_init(void) { struct brd_device *brd, *next; @@ -496,8 +515,7 @@ static int __init brd_init(void) if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) return -EIO; - if (unlikely(!max_part)) - max_part = 1; + brd_check_and_reset_par(); for (i = 0; i < rd_nr; i++) { brd = brd_alloc(i); From 0b87a2b795d66be7b54779848ef0f3901c5e46fc Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Tue, 4 Feb 2020 14:38:09 +0200 Subject: [PATCH 21/23] nvmet: Fix error print message at nvmet_install_queue function Place the arguments in the correct order. Fixes: 1672ddb8d691 ("nvmet: Add install_queue callout") Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/fabrics-cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index f7297473d9eb..45ebc2e20458 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -136,7 +136,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) if (ret) { pr_err("failed to install queue %d cntlid %d ret %x\n", - qid, ret, ctrl->cntlid); + qid, ctrl->cntlid, ret); return ret; } } From 1a3f540d63152b8db0a12de508bfa03776217d83 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Tue, 4 Feb 2020 14:38:10 +0200 Subject: [PATCH 22/23] nvmet: Fix controller use after free After nvmet_install_queue() sets sq->ctrl calling to nvmet_sq_destroy() reduces the controller refcount. In case nvmet_install_queue() fails, calling to nvmet_ctrl_put() is done twice (at nvmet_sq_destroy and nvmet_execute_io_connect/nvmet_execute_admin_connect) instead of once for the queue which leads to use after free of the controller. Fix this by set NULL at sq->ctrl in case of a failure at nvmet_install_queue(). The bug leads to the following Call Trace: [65857.994862] refcount_t: underflow; use-after-free. [65858.108304] Workqueue: events nvmet_rdma_release_queue_work [nvmet_rdma] [65858.115557] RIP: 0010:refcount_warn_saturate+0xe5/0xf0 [65858.208141] Call Trace: [65858.211203] nvmet_sq_destroy+0xe1/0xf0 [nvmet] [65858.216383] nvmet_rdma_release_queue_work+0x37/0xf0 [nvmet_rdma] [65858.223117] process_one_work+0x167/0x370 [65858.227776] worker_thread+0x49/0x3e0 [65858.232089] kthread+0xf5/0x130 [65858.235895] ? max_active_store+0x80/0x80 [65858.240504] ? kthread_bind+0x10/0x10 [65858.244832] ret_from_fork+0x1f/0x30 [65858.249074] ---[ end trace f82d59250b54beb7 ]--- Fixes: bb1cc74790eb ("nvmet: implement valid sqhd values in completions") Fixes: 1672ddb8d691 ("nvmet: Add install_queue callout") Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/fabrics-cmd.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 45ebc2e20458..feef15c38ec9 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -109,6 +109,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) u16 qid = le16_to_cpu(c->qid); u16 sqsize = le16_to_cpu(c->sqsize); struct nvmet_ctrl *old; + u16 ret; old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { @@ -119,7 +120,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) if (!sqsize) { pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + goto err; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -132,16 +134,19 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) } if (ctrl->ops->install_queue) { - u16 ret = ctrl->ops->install_queue(req->sq); - + ret = ctrl->ops->install_queue(req->sq); if (ret) { pr_err("failed to install queue %d cntlid %d ret %x\n", qid, ctrl->cntlid, ret); - return ret; + goto err; } } return 0; + +err: + req->sq->ctrl = NULL; + return ret; } static void nvmet_execute_admin_connect(struct nvmet_req *req) From 0f5be6a4ff7b3f8bf3db15f904e3e76797a43d9a Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Thu, 30 Jan 2020 10:29:34 -0800 Subject: [PATCH 23/23] nvmet: update AEN list and array at one place All async events are enqueued via nvmet_add_async_event() which updates the ctrl->async_event_cmds[] array and additionally an struct nvmet_async_event is added to the ctrl->async_events list. Under normal operations the nvmet_async_event_work() updates again the ctrl->async_event_cmds and removes the corresponding struct nvmet_async_event from the list again. Though nvmet_sq_destroy() could be called which calls nvmet_async_events_free() which only updates the ctrl->async_event_cmds[] array. Add new functions nvmet_async_events_process() and nvmet_async_events_free() to process async events, update an array and the list. When we destroy submission queue after clearing the aen present on the ctrl->async list we also loop over ctrl->async_event_cmds[] for any requests posted by the host for which we don't have the AEN in the ctrl->async_events list by calling nvmet_async_event_process() and nvmet_async_events_free(). Reviewed-by: Christoph Hellwig Signed-off-by: Daniel Wagner [chaitanya.kulkarni@wdc.com * Loop over and clear out outstanding requests * Update changelog ] Signed-off-by: Chaitanya Kulkarni Signed-off-by: Keith Busch --- drivers/nvme/target/core.c | 63 ++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 27 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 461987f669c5..576de773b4db 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -129,27 +129,8 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen) return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); } -static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) +static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status) { - struct nvmet_req *req; - - while (1) { - mutex_lock(&ctrl->lock); - if (!ctrl->nr_async_event_cmds) { - mutex_unlock(&ctrl->lock); - return; - } - - req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; - mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); - } -} - -static void nvmet_async_event_work(struct work_struct *work) -{ - struct nvmet_ctrl *ctrl = - container_of(work, struct nvmet_ctrl, async_event_work); struct nvmet_async_event *aen; struct nvmet_req *req; @@ -159,20 +140,43 @@ static void nvmet_async_event_work(struct work_struct *work) struct nvmet_async_event, entry); if (!aen || !ctrl->nr_async_event_cmds) { mutex_unlock(&ctrl->lock); - return; + break; } req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; - nvmet_set_result(req, nvmet_async_event_result(aen)); + if (status == 0) + nvmet_set_result(req, nvmet_async_event_result(aen)); list_del(&aen->entry); kfree(aen); mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, 0); + nvmet_req_complete(req, status); } } +static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) +{ + struct nvmet_req *req; + + mutex_lock(&ctrl->lock); + while (ctrl->nr_async_event_cmds) { + req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; + mutex_unlock(&ctrl->lock); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + mutex_lock(&ctrl->lock); + } + mutex_unlock(&ctrl->lock); +} + +static void nvmet_async_event_work(struct work_struct *work) +{ + struct nvmet_ctrl *ctrl = + container_of(work, struct nvmet_ctrl, async_event_work); + + nvmet_async_events_process(ctrl, 0); +} + void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page) { @@ -753,19 +757,24 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) void nvmet_sq_destroy(struct nvmet_sq *sq) { + u16 status = NVME_SC_INTERNAL | NVME_SC_DNR; + struct nvmet_ctrl *ctrl = sq->ctrl; + /* * If this is the admin queue, complete all AERs so that our * queue doesn't have outstanding requests on it. */ - if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) - nvmet_async_events_free(sq->ctrl); + if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) { + nvmet_async_events_process(ctrl, status); + nvmet_async_events_free(ctrl); + } percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); wait_for_completion(&sq->confirm_done); wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); - if (sq->ctrl) { - nvmet_ctrl_put(sq->ctrl); + if (ctrl) { + nvmet_ctrl_put(ctrl); sq->ctrl = NULL; /* allows reusing the queue later */ } }