mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-12 04:15:21 +02:00
RDMA/hns: Fix return value in hns_roce_map_mr_sg
[ Upstream commit203b70fda6
] As described in the ib_map_mr_sg function comment, it returns the number of sg elements that were mapped to the memory region. However, hns_roce_map_mr_sg returns the number of pages required for mapping the DMA area. Fix it. Fixes:9b2cf76c9f
("RDMA/hns: Optimize PBL buffer allocation process") Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> Link: https://lore.kernel.org/r/20240411033851.2884771-1-shaozhengchao@huawei.com Reviewed-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
6b4f693992
commit
db415a39ff
|
@ -421,18 +421,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||||
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
||||||
struct hns_roce_mtr *mtr = &mr->pbl_mtr;
|
struct hns_roce_mtr *mtr = &mr->pbl_mtr;
|
||||||
int ret = 0;
|
int ret, sg_num = 0;
|
||||||
|
|
||||||
mr->npages = 0;
|
mr->npages = 0;
|
||||||
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
|
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
|
||||||
sizeof(dma_addr_t), GFP_KERNEL);
|
sizeof(dma_addr_t), GFP_KERNEL);
|
||||||
if (!mr->page_list)
|
if (!mr->page_list)
|
||||||
return ret;
|
return sg_num;
|
||||||
|
|
||||||
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
|
sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
|
||||||
if (ret < 1) {
|
if (sg_num < 1) {
|
||||||
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
|
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
|
||||||
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
|
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
|
||||||
goto err_page_list;
|
goto err_page_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -443,17 +443,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||||
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
|
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
|
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
|
||||||
ret = 0;
|
sg_num = 0;
|
||||||
} else {
|
} else {
|
||||||
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
|
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
|
||||||
ret = mr->npages;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err_page_list:
|
err_page_list:
|
||||||
kvfree(mr->page_list);
|
kvfree(mr->page_list);
|
||||||
mr->page_list = NULL;
|
mr->page_list = NULL;
|
||||||
|
|
||||||
return ret;
|
return sg_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
|
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user