mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-09-03 02:16:09 +02:00
staging: fsl_qbman: account for pre-initialized BARs in case of kexec
The QMan FQD and PFDR, as well as the BMan FBPR private memory regions
are configured in BAR registers that can be set only once per SoC reset.
In cases such as kexec, a second kernel might attempt to initialize the
QBMan block a second time without a full reset.
In this scenario, in the second kernel, we validate whether the
pre-configured private memory regions match the reserved memory regions
provided in the device tree.
We rely on the bootloader to reserve the memory regions and set the
addresses in the device tree, so all successive kernels use the same
regions.
If the system was previously configured, make sure the FQs are all reset
before use, buffer pools are drained, and skip initializing the PFDRs.
Additionally, zero out the PFDR/FQD memory regions before use, as
indicated by the manual, and disable interrupts during portal recovery.
Series inspired by the following changes for the upstream QBMan driver:
https://patchwork.kernel.org/project/linux-arm-kernel/list/?series=154265&state=%2A&archive=both
In particular, this mirrors upstream commit 97777078d6
("soc/fsl/qbman: Rework QBMan private memory setup").
Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
This commit is contained in:
parent
33aaa2c8d9
commit
a218c908c8
|
@ -236,17 +236,40 @@ static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
|
|||
bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
|
||||
}
|
||||
|
||||
static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
|
||||
static int bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size,
|
||||
bool *need_cleanup)
|
||||
{
|
||||
u32 exp = ilog2(size);
|
||||
u32 bar, bare, exp = ilog2(size);
|
||||
|
||||
/* choke if size isn't within range */
|
||||
DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
|
||||
is_power_of_2(size));
|
||||
/* choke if '[e]ba' has lower-alignment than 'size' */
|
||||
DPA_ASSERT(!(ba & (size - 1)));
|
||||
|
||||
/* Check to see if the BMan has already been initialized */
|
||||
bar = bm_in(FBPR_BAR);
|
||||
if (bar) {
|
||||
/* Make sure the base address hasn't changed */
|
||||
bare = bm_in(FBPR_BARE);
|
||||
if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
|
||||
pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
|
||||
ba, bare, bar);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_devel("BMan FBPR BAR previously initialized with BARE=0x%x BAR=0x%x\n",
|
||||
bare, bar);
|
||||
|
||||
*need_cleanup = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bm_out(FBPR_BARE, upper_32_bits(ba));
|
||||
bm_out(FBPR_BAR, lower_32_bits(ba));
|
||||
bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*****************/
|
||||
|
@ -456,15 +479,20 @@ static int __bind_irq(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bman_init_ccsr(struct device_node *node)
|
||||
int bman_init_ccsr(struct device_node *node, bool *need_cleanup)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!bman_have_ccsr())
|
||||
return 0;
|
||||
if (node != bm_node)
|
||||
return -EINVAL;
|
||||
|
||||
/* FBPR memory */
|
||||
bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
|
||||
ret = bm_set_memory(bm, fbpr_a, 0, fbpr_sz, need_cleanup);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
|
||||
|
||||
ret = __bind_irq();
|
||||
|
|
|
@ -382,13 +382,14 @@ __init int bman_init(void)
|
|||
LIST_HEAD(shared_pcfgs);
|
||||
struct device_node *dn;
|
||||
struct bm_portal_config *pcfg;
|
||||
struct cpumask offline_cpus;
|
||||
bool need_cleanup = false;
|
||||
struct bman_portal *p;
|
||||
int cpu, ret;
|
||||
struct cpumask offline_cpus;
|
||||
|
||||
/* Initialise the Bman (CCSR) device */
|
||||
for_each_compatible_node(dn, NULL, "fsl,bman") {
|
||||
if (!bman_init_ccsr(dn))
|
||||
if (!bman_init_ccsr(dn, &need_cleanup))
|
||||
pr_info("Bman err interrupt handler present\n");
|
||||
else
|
||||
pr_err("Bman CCSR setup failed\n");
|
||||
|
@ -486,6 +487,22 @@ __init int bman_init(void)
|
|||
if (!cpumask_empty(&slave_cpus))
|
||||
for_each_cpu(cpu, &slave_cpus)
|
||||
init_slave(cpu);
|
||||
|
||||
if (need_cleanup) {
|
||||
int i;
|
||||
|
||||
pr_info("BMan wasn't reset prior to boot, emptying all %d buffer pools\n",
|
||||
bman_pool_max);
|
||||
|
||||
for (i = 0; i < bman_pool_max; i++) {
|
||||
ret = bman_shutdown_pool(i);
|
||||
if (ret) {
|
||||
pr_err("Failed to shut down buffer pool %d: %pe\n",
|
||||
i, ERR_PTR(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("Bman portals initialised\n");
|
||||
cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
|
||||
for_each_cpu(cpu, &offline_cpus)
|
||||
|
|
|
@ -67,7 +67,7 @@ struct bm_portal_config {
|
|||
|
||||
#ifdef CONFIG_FSL_BMAN_CONFIG
|
||||
/* Hooks from bman_driver.c to bman_config.c */
|
||||
int bman_init_ccsr(struct device_node *node);
|
||||
int bman_init_ccsr(struct device_node *node, bool *need_cleanup);
|
||||
#endif
|
||||
|
||||
/* Hooks from bman_driver.c in to bman_high.c */
|
||||
|
|
|
@ -380,16 +380,61 @@ static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
|
|||
*cfg = v2 & 0xff;
|
||||
}
|
||||
|
||||
static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
|
||||
int enable, int prio, int stash, u32 size)
|
||||
/* Configure the FQD/PFDR private memory areas in the corresponding BAR
|
||||
* registers. The registers can be set only once per SoC reset.
|
||||
*
|
||||
* For cases such as kexec where the memory might have already been
|
||||
* initialized, verify that the provided base address matches the
|
||||
* configured one.
|
||||
*
|
||||
* Returns 0 on success (BAR registers didn't need to be initialized or were
|
||||
* already initialized), or a negative error code on failure (the provided
|
||||
* base address does not match the pre-programmed one, or couldn't zero out
|
||||
* the memory).
|
||||
*/
|
||||
static int qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
|
||||
int enable, int prio, int stash, u32 size,
|
||||
bool *need_cleanup)
|
||||
{
|
||||
u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
|
||||
u32 exp = ilog2(size);
|
||||
u32 bar, bare;
|
||||
void *ptr;
|
||||
|
||||
/* choke if size isn't within range */
|
||||
DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
|
||||
is_power_of_2(size));
|
||||
/* choke if 'ba' has lower-alignment than 'size' */
|
||||
DPA_ASSERT(!(ba & (size - 1)));
|
||||
|
||||
/* Check to see if the QMan has already been initialized */
|
||||
bar = __qm_in(qm, offset + REG_offset_BAR);
|
||||
if (bar) {
|
||||
/* Make sure the base address hasn't changed */
|
||||
bare = __qm_in(qm, offset);
|
||||
if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
|
||||
pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
|
||||
ba, bare, bar);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_devel("QMan BAR previously initialized with BARE=0x%x BAR=0x%x\n",
|
||||
bare, bar);
|
||||
|
||||
*need_cleanup = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Need to temporarily map the area to make sure it is zeroed */
|
||||
ptr = memremap(ba, size, MEMREMAP_WB);
|
||||
if (!ptr) {
|
||||
pr_crit("memremap() of QMan private memory failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(ptr, 0, size);
|
||||
memunmap(ptr);
|
||||
|
||||
__qm_out(qm, offset, upper_32_bits(ba));
|
||||
__qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
|
||||
__qm_out(qm, offset + REG_offset_AR,
|
||||
|
@ -397,6 +442,7 @@ static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
|
|||
(prio ? 0x40000000 : 0) |
|
||||
(stash ? 0x20000000 : 0) |
|
||||
(exp - 1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
|
||||
|
@ -739,7 +785,7 @@ static int __bind_irq(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int qman_init_ccsr(struct device_node *node)
|
||||
int qman_init_ccsr(struct device_node *node, bool *need_cleanup)
|
||||
{
|
||||
int ret;
|
||||
if (!qman_have_ccsr())
|
||||
|
@ -752,10 +798,24 @@ int qman_init_ccsr(struct device_node *node)
|
|||
qm_out(QCSP_BAR, 0x0);
|
||||
#endif
|
||||
/* FQD memory */
|
||||
qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
|
||||
ret = qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz,
|
||||
need_cleanup);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* PFDR memory */
|
||||
qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
|
||||
qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
|
||||
ret = qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz,
|
||||
need_cleanup);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Don't reinitialize the PFDRs if the PFDR BAR was pre-configured */
|
||||
if (!*need_cleanup) {
|
||||
ret = qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* thresholds */
|
||||
qm_set_pfdr_threshold(qm, 512, 64);
|
||||
qm_set_sfdr_threshold(qm, 128);
|
||||
|
|
|
@ -612,13 +612,14 @@ void qm_put_unused_portal(struct qm_portal_config *pcfg)
|
|||
spin_unlock(&unused_pcfgs_lock);
|
||||
}
|
||||
|
||||
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
|
||||
static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg,
|
||||
bool need_cleanup)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
|
||||
pcfg->iommu_domain = NULL;
|
||||
portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
|
||||
p = qman_create_affine_portal(pcfg, NULL);
|
||||
p = qman_create_affine_portal(pcfg, NULL, need_cleanup);
|
||||
if (p) {
|
||||
u32 irq_sources = 0;
|
||||
/* Determine what should be interrupt-vs-poll driven */
|
||||
|
@ -732,13 +733,14 @@ __init int qman_init(void)
|
|||
struct device_node *dn;
|
||||
struct qm_portal_config *pcfg;
|
||||
struct qman_portal *p;
|
||||
int cpu, ret;
|
||||
int cpu, ret, i;
|
||||
const u32 *clk;
|
||||
struct cpumask offline_cpus;
|
||||
bool need_cleanup = false;
|
||||
|
||||
/* Initialise the Qman (CCSR) device */
|
||||
for_each_compatible_node(dn, NULL, "fsl,qman") {
|
||||
if (!qman_init_ccsr(dn))
|
||||
if (!qman_init_ccsr(dn, &need_cleanup))
|
||||
pr_info("Qman err interrupt handler present\n");
|
||||
else
|
||||
pr_err("Qman CCSR setup failed\n");
|
||||
|
@ -849,7 +851,7 @@ __init int qman_init(void)
|
|||
}
|
||||
list_for_each_entry(pcfg, &unshared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 0;
|
||||
p = init_pcfg(pcfg);
|
||||
p = init_pcfg(pcfg, need_cleanup);
|
||||
if (!p) {
|
||||
pr_crit("Unable to configure portals\n");
|
||||
return 0;
|
||||
|
@ -857,7 +859,7 @@ __init int qman_init(void)
|
|||
}
|
||||
list_for_each_entry(pcfg, &shared_pcfgs, list) {
|
||||
pcfg->public_cfg.is_shared = 1;
|
||||
p = init_pcfg(pcfg);
|
||||
p = init_pcfg(pcfg, need_cleanup);
|
||||
if (p)
|
||||
shared_portals[num_shared_portals++] = p;
|
||||
}
|
||||
|
@ -877,6 +879,24 @@ __init int qman_init(void)
|
|||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (need_cleanup) {
|
||||
size_t num_fqs = get_qman_fqd_size() / 64;
|
||||
|
||||
pr_info("QMan wasn't reset prior to boot, shutting down %zu FQs\n",
|
||||
num_fqs);
|
||||
|
||||
for (i = 0; i < num_fqs; i++) {
|
||||
ret = qman_shutdown_fq(i);
|
||||
if (ret) {
|
||||
pr_err("QMan: Failed to shutdown frame queue %d: %pe\n",
|
||||
i, ERR_PTR(ret));
|
||||
}
|
||||
}
|
||||
pr_info("QMan: shutdown finished, enabling IRQs\n");
|
||||
qman_enable_irqs();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -447,6 +447,21 @@ static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
|
|||
be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
|
||||
}
|
||||
|
||||
void qman_enable_irqs(void)
|
||||
{
|
||||
struct qman_portal *p;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (affine_portals[i]) {
|
||||
p = (struct qman_portal *)affine_portals[i];
|
||||
qm_isr_status_clear(&p->p, 0xffffffff);
|
||||
qm_isr_uninhibit(&p->p);
|
||||
}
|
||||
}
|
||||
pr_debug("QMan: IRQs enabled\n");
|
||||
}
|
||||
|
||||
/* In the case that slow- and fast-path handling are both done by qman_poll()
|
||||
* (ie. because there is no interrupt handling), we ought to balance how often
|
||||
* we do the fast-path poll versus the slow-path poll. We'll use two decrementer
|
||||
|
@ -570,7 +585,8 @@ struct dev_pm_domain qman_portal_device_pm_domain = {
|
|||
struct qman_portal *qman_create_portal(
|
||||
struct qman_portal *portal,
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs)
|
||||
const struct qman_cgrs *cgrs,
|
||||
bool need_cleanup)
|
||||
{
|
||||
struct qm_portal *__p;
|
||||
char buf[16];
|
||||
|
@ -694,8 +710,8 @@ struct qman_portal *qman_create_portal(
|
|||
qm_isr_disable_write(__p, isdr);
|
||||
portal->irq_sources = 0;
|
||||
qm_isr_enable_write(__p, portal->irq_sources);
|
||||
qm_isr_status_clear(__p, 0xffffffff);
|
||||
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
|
||||
qm_isr_inhibit(__p);
|
||||
if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
|
||||
portal)) {
|
||||
pr_err("request_irq() failed\n");
|
||||
|
@ -743,8 +759,12 @@ struct qman_portal *qman_create_portal(
|
|||
* If left enabled they cause problems with sleep mode. Since
|
||||
* they are not used in push mode we can safely turn them off
|
||||
*/
|
||||
qm_isr_status_clear(__p, 0xffffffff);
|
||||
qm_isr_disable_write(__p, QM_DQAVAIL_MASK);
|
||||
qm_isr_uninhibit(__p);
|
||||
if (!need_cleanup) {
|
||||
pr_info("QMan doesn't need cleanup, uninhibiting IRQs\n");
|
||||
qm_isr_uninhibit(__p);
|
||||
}
|
||||
/* Write a sane SDQCR */
|
||||
qm_dqrr_sdqcr_set(__p, portal->sdqcr);
|
||||
return portal;
|
||||
|
@ -778,13 +798,14 @@ fail_eqcr:
|
|||
|
||||
struct qman_portal *qman_create_affine_portal(
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs)
|
||||
const struct qman_cgrs *cgrs,
|
||||
bool need_cleanup)
|
||||
{
|
||||
struct qman_portal *res;
|
||||
struct qman_portal *portal;
|
||||
|
||||
portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
|
||||
res = qman_create_portal(portal, config, cgrs);
|
||||
res = qman_create_portal(portal, config, cgrs, need_cleanup);
|
||||
if (res) {
|
||||
spin_lock(&affine_mask_lock);
|
||||
cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
|
||||
|
|
|
@ -212,7 +212,7 @@ extern u16 qman_portal_max;
|
|||
|
||||
#ifdef CONFIG_FSL_QMAN_CONFIG
|
||||
/* Hooks from qman_driver.c to qman_config.c */
|
||||
int qman_init_ccsr(struct device_node *node);
|
||||
int qman_init_ccsr(struct device_node *node, bool *need_cleanup);
|
||||
void qman_liodn_fixup(u16 channel);
|
||||
int qman_set_sdest(u16 channel, unsigned int cpu_idx);
|
||||
size_t get_qman_fqd_size(void);
|
||||
|
@ -230,15 +230,18 @@ int qm_get_wpm(int *wpm);
|
|||
struct qman_portal *qman_create_portal(
|
||||
struct qman_portal *portal,
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs);
|
||||
const struct qman_cgrs *cgrs,
|
||||
bool need_cleanup);
|
||||
|
||||
struct qman_portal *qman_create_affine_portal(
|
||||
const struct qm_portal_config *config,
|
||||
const struct qman_cgrs *cgrs);
|
||||
const struct qman_cgrs *cgrs,
|
||||
bool need_cleanup);
|
||||
struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
|
||||
int cpu);
|
||||
const struct qm_portal_config *qman_destroy_affine_portal(void);
|
||||
void qman_destroy_portal(struct qman_portal *qm);
|
||||
void qman_enable_irqs(void);
|
||||
|
||||
/* Hooks from fsl_usdpaa.c to qman_driver.c */
|
||||
struct qm_portal_config *qm_get_unused_portal(void);
|
||||
|
|
Loading…
Reference in New Issue
Block a user