mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
crypto: hisilicon - re-enable address prefetch after device resuming
[ Upstream commit 0dcd21443d9308ed88909d35aa0490c3fc680a47 ]
When the device resumes from a suspended state, it will revert to its
initial state and requires re-enabling. Currently, the address prefetch
function is not re-enabled after device resuming. Move the address prefetch
enable to the initialization process. In this way, the address prefetch
can be enabled when the device resumes by calling the initialization
process.
Fixes: 607c191b37
("crypto: hisilicon - support runtime PM for accelerator device")
Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
94b09b0ac0
commit
c9035e9ba5
|
@ -689,6 +689,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
|
||||
/* Config data buffer pasid needed by Kunpeng 920 */
|
||||
hpre_config_pasid(qm);
|
||||
hpre_open_sva_prefetch(qm);
|
||||
|
||||
hpre_enable_clock_gate(qm);
|
||||
|
||||
|
@ -1366,8 +1367,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
hpre_open_sva_prefetch(qm);
|
||||
|
||||
hisi_qm_dev_err_init(qm);
|
||||
ret = hpre_show_last_regs_init(qm);
|
||||
if (ret)
|
||||
|
|
|
@ -4363,9 +4363,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
|
|||
{
|
||||
u32 value;
|
||||
|
||||
if (qm->err_ini->open_sva_prefetch)
|
||||
qm->err_ini->open_sva_prefetch(qm);
|
||||
|
||||
if (qm->ver >= QM_HW_V3)
|
||||
return;
|
||||
|
||||
|
|
|
@ -436,6 +436,45 @@ static void sec_set_endian(struct hisi_qm *qm)
|
|||
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
|
||||
}
|
||||
|
||||
static void sec_close_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val |= SEC_PREFETCH_DISABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
|
||||
val, !(val & SEC_SVA_DISABLE_READY),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to close sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_open_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
/* Enable prefetch */
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val &= SEC_PREFETCH_ENABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
|
||||
val, !(val & SEC_PREFETCH_DISABLE),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to open sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_engine_sva_config(struct hisi_qm *qm)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -469,45 +508,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
|
|||
writel_relaxed(reg, qm->io_base +
|
||||
SEC_INTERFACE_USER_CTRL1_REG);
|
||||
}
|
||||
}
|
||||
|
||||
static void sec_open_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
/* Enable prefetch */
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val &= SEC_PREFETCH_ENABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
|
||||
val, !(val & SEC_PREFETCH_DISABLE),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to open sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_close_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val |= SEC_PREFETCH_DISABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
|
||||
val, !(val & SEC_SVA_DISABLE_READY),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to close sva prefetch\n");
|
||||
sec_open_sva_prefetch(qm);
|
||||
}
|
||||
|
||||
static void sec_enable_clock_gate(struct hisi_qm *qm)
|
||||
|
@ -1090,7 +1091,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
sec_open_sva_prefetch(qm);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
sec_debug_regs_clear(qm);
|
||||
ret = sec_show_last_regs_init(qm);
|
||||
|
|
|
@ -577,6 +577,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
|
||||
}
|
||||
hisi_zip_open_sva_prefetch(qm);
|
||||
|
||||
/* let's open all compression/decompression cores */
|
||||
dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
|
||||
|
@ -588,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
|
||||
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
|
||||
|
||||
hisi_zip_set_high_perf(qm);
|
||||
hisi_zip_enable_clock_gate(qm);
|
||||
|
||||
return 0;
|
||||
|
@ -1172,9 +1174,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
hisi_zip_set_high_perf(qm);
|
||||
|
||||
hisi_zip_open_sva_prefetch(qm);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
hisi_zip_debug_regs_clear(qm);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user