Merge 08b8f206de ("bpf: Fix truncation bug in coerce_reg_to_size_sx()") into android15-6.6-lts

Steps on the way to 6.6.59

Change-Id: Ic3151f7161ba1c9f9bbc9a3e8a6b244ea4e4eea1
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-29 12:46:33 +00:00
commit c73311cb64
39 changed files with 318 additions and 184 deletions

View File

@ -272,18 +272,19 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
goto no_pdev; goto no_pdev;
switch (ccdf->pec) { switch (ccdf->pec) {
case 0x003a: /* Service Action or Error Recovery Successful */ case 0x002a: /* Error event concerns FMB */
case 0x002b:
case 0x002c:
break;
case 0x0040: /* Service Action or Error Recovery Failed */
case 0x003b:
zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
break;
default: /* PCI function left in the error state attempt to recover */
ers_res = zpci_event_attempt_error_recovery(pdev); ers_res = zpci_event_attempt_error_recovery(pdev);
if (ers_res != PCI_ERS_RESULT_RECOVERED) if (ers_res != PCI_ERS_RESULT_RECOVERED)
zpci_event_io_failure(pdev, pci_channel_io_perm_failure); zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
break; break;
default:
/*
* Mark as frozen not permanently failed because the device
* could be subsequently recovered by the platform.
*/
zpci_event_io_failure(pdev, pci_channel_io_frozen);
break;
} }
pci_dev_put(pdev); pci_dev_put(pdev);
no_pdev: no_pdev:

View File

@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents; nents = sgt->nents;
nents_dma = nents; nents_dma = nents;
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
for_each_sgtable_sg(sgt, sg, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp); *size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */ /* Save 1K for possible follow-up transactions. */
if (*size < SZ_1K) { if (*size < SZ_1K) {

View File

@ -177,7 +177,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
nents = 0; nents = 0;
size = size ? size : PAGE_SIZE; size = size ? size : PAGE_SIZE;
for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) { for_each_sgtable_dma_sg(sgt_in, sg, j) {
len = sg_dma_len(sg); len = sg_dma_len(sg);
if (!len) if (!len)
@ -214,7 +214,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
/* copy relevant sg node and fix page and length */ /* copy relevant sg node and fix page and length */
sgn = sgf; sgn = sgf;
for_each_sgtable_sg(sgt, sg, j) { for_each_sgtable_dma_sg(sgt, sg, j) {
memcpy(sg, sgn, sizeof(*sg)); memcpy(sg, sgn, sizeof(*sg));
if (sgn == sgf) { if (sgn == sgf) {
sg_dma_address(sg) += offf; sg_dma_address(sg) += offf;
@ -294,7 +294,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
* fence. * fence.
*/ */
dev_addr = req->dev_addr; dev_addr = req->dev_addr;
for_each_sgtable_sg(slice->sgt, sg, i) { for_each_sgtable_dma_sg(slice->sgt, sg, i) {
slice->reqs[i].cmd = cmd; slice->reqs[i].cmd = cmd;
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
sg_dma_address(sg) : dev_addr); sg_dma_address(sg) : dev_addr);

View File

@ -2634,10 +2634,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
dbg->top_dentry = top_dentry; dbg->top_dentry = top_dentry;
if (devm_add_action_or_reset(info->dev, if (devm_add_action_or_reset(info->dev,
scmi_debugfs_common_cleanup, dbg)) { scmi_debugfs_common_cleanup, dbg))
scmi_debugfs_common_cleanup(dbg);
return NULL; return NULL;
}
return dbg; return dbg;
} }

View File

@ -1053,8 +1053,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL); AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r) if (r) {
amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory; goto clean_up_memory;
}
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@ -1087,7 +1089,6 @@ clean_up_ring:
amdgpu_ring_fini(ring); amdgpu_ring_fini(ring);
clean_up_memory: clean_up_memory:
kfree(ring); kfree(ring);
amdgpu_mes_unlock(&adev->mes);
return r; return r;
} }

View File

@ -722,12 +722,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
_dpu_crtc_complete_flip(crtc); _dpu_crtc_complete_flip(crtc);
} }
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode; struct drm_display_mode *adj_mode = &state->adjusted_mode;
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i; int i;
for (i = 0; i < cstate->num_mixers; i++) { for (i = 0; i < cstate->num_mixers; i++) {
@ -738,7 +739,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->y2 = adj_mode->vdisplay; r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
return -E2BIG;
} }
return 0;
} }
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@ -814,7 +820,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state); _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
/* encoder will trigger pending mask now */ /* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
@ -1208,8 +1214,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
if (crtc_state->active_changed) if (crtc_state->active_changed)
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
if (cstate->num_mixers) if (cstate->num_mixers) {
_dpu_crtc_setup_lm_bounds(crtc, crtc_state); rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
if (rc)
return rc;
}
/* FIXME: move this to dpu_plane_atomic_check? */ /* FIXME: move this to dpu_plane_atomic_check? */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {

View File

@ -1122,21 +1122,20 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
for (i = 0; i < dpu_enc->num_phys_encs; i++) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!dpu_enc->hw_pp[i]) { phys->hw_pp = dpu_enc->hw_pp[i];
if (!phys->hw_pp) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i); "no pp block assigned at idx: %d\n", i);
return; return;
} }
if (!hw_ctl[i]) { phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc, DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i); "no ctl block assigned at idx: %d\n", i);
return; return;
} }
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
phys->cached_mode = crtc_state->adjusted_mode; phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set) if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state); phys->ops.atomic_mode_set(phys, crtc_state, conn_state);

View File

@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
end_addr = base_addr + aligned_len; end_addr = base_addr + aligned_len;
if (!(*reg)) if (!(*reg))
*reg = kzalloc(len_padded, GFP_KERNEL); *reg = kvzalloc(len_padded, GFP_KERNEL);
if (*reg) if (*reg)
dump_addr = *reg; dump_addr = *reg;
@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
} }
} }
static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr, static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
struct drm_printer *p) void __iomem *base_addr, struct drm_printer *p)
{ {
int i; int i;
u32 *dump_addr = NULL;
void __iomem *addr; void __iomem *addr;
u32 num_rows; u32 num_rows;
if (!dump_addr) {
drm_printf(p, "Registers not stored\n");
return;
}
addr = base_addr; addr = base_addr;
num_rows = len / REG_DUMP_ALIGN; num_rows = len / REG_DUMP_ALIGN;
if (*reg)
dump_addr = *reg;
for (i = 0; i < num_rows; i++) { for (i = 0; i < num_rows; i++) {
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n", drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
(unsigned long)(addr - base_addr), (unsigned long)(addr - base_addr),
@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
list_for_each_entry_safe(block, tmp, &state->blocks, node) { list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name); drm_printf(p, "====================%s================\n", block->name);
msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p); msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
} }
drm_printf(p, "===================dpu drm state================\n"); drm_printf(p, "===================dpu drm state================\n");
@ -161,7 +162,7 @@ void msm_disp_state_free(void *data)
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) { list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
list_del(&block->node); list_del(&block->node);
kfree(block->state); kvfree(block->state);
kfree(block); kfree(block);
} }

View File

@ -537,7 +537,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
return new_htotal * mode->vtotal * drm_mode_vrefresh(mode); return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
} }
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
@ -545,7 +545,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
{ {
unsigned long pclk_rate; unsigned long pclk_rate;
pclk_rate = mode->clock * 1000; pclk_rate = mode->clock * 1000u;
if (dsc) if (dsc)
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);

View File

@ -881,6 +881,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *new_crtc_state; struct drm_crtc_state *new_crtc_state;
conn_state = drm_atomic_get_connector_state(state, conn); conn_state = drm_atomic_get_connector_state(state, conn);
if (IS_ERR(conn_state))
return PTR_ERR(conn_state);
du = vmw_connector_to_stdu(conn); du = vmw_connector_to_stdu(conn);
if (!conn_state->crtc) if (!conn_state->crtc)

View File

@ -82,25 +82,26 @@ config ADMV1014
module will be called admv1014. module will be called admv1014.
config ADMV4420 config ADMV4420
tristate "Analog Devices ADMV4420 K Band Downconverter" tristate "Analog Devices ADMV4420 K Band Downconverter"
depends on SPI depends on SPI
help select REGMAP_SPI
Say yes here to build support for Analog Devices K Band help
Downconverter with integrated Fractional-N PLL and VCO. Say yes here to build support for Analog Devices K Band
Downconverter with integrated Fractional-N PLL and VCO.
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called admv4420. module will be called admv4420.
config ADRF6780 config ADRF6780
tristate "Analog Devices ADRF6780 Microwave Upconverter" tristate "Analog Devices ADRF6780 Microwave Upconverter"
depends on SPI depends on SPI
depends on COMMON_CLK depends on COMMON_CLK
help help
Say yes here to build support for Analog Devices ADRF6780 Say yes here to build support for Analog Devices ADRF6780
5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter. 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called adrf6780. module will be called adrf6780.
endmenu endmenu
endmenu endmenu

View File

@ -366,12 +366,12 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done; goto done;
} }
} }
if (rdev->pacing.dbr_pacing) if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
bnxt_re_copy_db_pacing_stats(rdev, stats); bnxt_re_copy_db_pacing_stats(rdev, stats);
} }
done: done:
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS; BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
} }
@ -381,7 +381,7 @@ struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
int num_counters = 0; int num_counters = 0;
if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
num_counters = BNXT_RE_NUM_EXT_COUNTERS; num_counters = BNXT_RE_NUM_EXT_COUNTERS;
else else
num_counters = BNXT_RE_NUM_STD_COUNTERS; num_counters = BNXT_RE_NUM_STD_COUNTERS;

View File

@ -1023,7 +1023,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */ /* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search); sizeof(struct sq_psn_search);
psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
@ -1234,7 +1234,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
qplqp = &qp->qplib_qp; qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr; dev_attr = &rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges; qplqp->rq.max_sge = dev_attr->max_qp_sges;
if (qplqp->rq.max_sge > dev_attr->max_qp_sges) if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
qplqp->rq.max_sge = dev_attr->max_qp_sges; qplqp->rq.max_sge = dev_attr->max_qp_sges;
@ -1301,7 +1301,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
qplqp = &qp->qplib_qp; qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr; dev_attr = &rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx); entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
qplqp->sq.max_wqe = min_t(u32, entries, qplqp->sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1); dev_attr->max_qp_wqes + 1);
@ -1328,7 +1328,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
goto out; goto out;
} }
if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
init_attr->qp_type == IB_QPT_GSI) init_attr->qp_type == IB_QPT_GSI)
qptype = CMDQ_CREATE_QP_TYPE_GSI; qptype = CMDQ_CREATE_QP_TYPE_GSI;
out: out:
@ -1527,7 +1527,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
goto fail; goto fail;
if (qp_init_attr->qp_type == IB_QPT_GSI && if (qp_init_attr->qp_type == IB_QPT_GSI &&
!(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) { !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
if (rc == -ENODEV) if (rc == -ENODEV)
goto qp_destroy; goto qp_destroy;

View File

@ -107,8 +107,11 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
dev_info(rdev_to_dev(rdev), dev_info(rdev_to_dev(rdev),
"Couldn't get DB bar size, Low latency framework is disabled\n"); "Couldn't get DB bar size, Low latency framework is disabled\n");
/* set register offsets for both UC and WC */ /* set register offsets for both UC and WC */
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET : if (bnxt_qplib_is_chip_gen_p7(cctx))
BNXT_QPLIB_DBR_PF_DB_OFFSET; res->dpi_tbl.ucreg.offset = offset;
else
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
BNXT_QPLIB_DBR_PF_DB_OFFSET;
res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset; res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
@ -128,7 +131,7 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
struct bnxt_qplib_chip_ctx *cctx; struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx; cctx = rdev->chip_ctx;
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
mode : BNXT_QPLIB_WQE_MODE_STATIC; mode : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev)) if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
@ -218,7 +221,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
attr->max_srq); attr->max_srq);
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] = rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr.tqm_alloc_reqs[i]; rdev->dev_attr.tqm_alloc_reqs[i];
@ -267,7 +270,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
bnxt_re_limit_pf_res(rdev); bnxt_re_limit_pf_res(rdev);
num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
if (num_vfs) if (num_vfs)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
@ -279,7 +282,7 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return; return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev); bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx); &rdev->qplib_ctx);
@ -1070,16 +1073,6 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0; return 0;
} }
#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
rdev->en_dev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{ {
int i; int i;
@ -1100,7 +1093,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
bnxt_qplib_init_res(&rdev->qplib_res); bnxt_qplib_init_res(&rdev->qplib_res);
for (i = 1; i < rdev->num_msix ; i++) { for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i); db_offt = rdev->en_dev->msix_entries[i].db_offset;
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->en_dev->msix_entries[i].vector, i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler, db_offt, &bnxt_re_cqn_handler,
@ -1511,7 +1504,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw; goto free_rcfw;
} }
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector; vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, vid, db_offt,
@ -1539,7 +1532,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
bnxt_re_set_resource_limits(rdev); bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
if (rc) { if (rc) {
ibdev_err(&rdev->ibdev, ibdev_err(&rdev->ibdev,
"Failed to allocate QPLIB context: %#x\n", rc); "Failed to allocate QPLIB context: %#x\n", rc);
@ -1662,7 +1655,7 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
return; return;
/* Currently enabling only for GenP5 adapters */ /* Currently enabling only for GenP5 adapters */
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
return; return;
if (enable) { if (enable) {

View File

@ -995,7 +995,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* SQ */ /* SQ */
if (qp->type == CMDQ_CREATE_QP_TYPE_RC) { if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ? psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
sizeof(struct sq_psn_search_ext) : sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search); sizeof(struct sq_psn_search);
@ -1649,7 +1649,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) & flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK); SQ_PSN_SEARCH_NEXT_PSN_MASK);
if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) { if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
psns_ext->opcode_start_psn = cpu_to_le32(op_spsn); psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
psns_ext->flags_next_psn = cpu_to_le32(flg_npsn); psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx); psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);

View File

@ -525,7 +525,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* failed with status */ /* failed with status */
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n", dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status); cookie, opcode, evnt->status);
rc = -EFAULT; rc = -EIO;
} }
return rc; return rc;
@ -852,7 +852,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
*/ */
if (is_virtfn) if (is_virtfn)
goto skip_ctx_setup; goto skip_ctx_setup;
if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx)) if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
goto config_vf_res; goto config_vf_res;
lvl = ctx->qpc_tbl.level; lvl = ctx->qpc_tbl.level;

View File

@ -257,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
dst_virt_ptr = dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
if (hwq_attr->type == HWQ_TYPE_MR) { for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
/* For MR it is expected that we supply only 1 contigous dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
* page i.e only 1 entry in the PDL that will contain
* all the PBLs for the user supplied memory region
*/
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
i++)
dst_virt_ptr[0][i] = src_phys_ptr[i] |
flag;
} else {
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
i++)
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] |
PTU_PDE_VALID;
}
/* Alloc or init PTEs */ /* Alloc or init PTEs */
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2], rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
hwq_attr->sginfo); hwq_attr->sginfo);
@ -807,7 +794,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
dpit = &res->dpi_tbl; dpit = &res->dpi_tbl;
reg = &dpit->wcreg; reg = &dpit->wcreg;
if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) { if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
/* Offest should come from L2 driver */ /* Offest should come from L2 driver */
dbr_offset = dev_attr->l2_db_size; dbr_offset = dev_attr->l2_db_size;
dpit->ucreg.offset = dbr_offset; dpit->ucreg.offset = dbr_offset;

View File

@ -44,6 +44,9 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_57508 0x1750 #define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751 #define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752 #define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
struct bnxt_qplib_drv_modes { struct bnxt_qplib_drv_modes {
u8 wqe_mode; u8 wqe_mode;
@ -296,6 +299,12 @@ struct bnxt_qplib_res {
struct bnxt_qplib_db_pacing_data *pacing_data; struct bnxt_qplib_db_pacing_data *pacing_data;
}; };
static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
{
return (cctx->chip_num == CHIP_NUM_58818 ||
cctx->chip_num == CHIP_NUM_57608);
}
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{ {
return (cctx->chip_num == CHIP_NUM_57508 || return (cctx->chip_num == CHIP_NUM_57508 ||
@ -303,15 +312,20 @@ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
cctx->chip_num == CHIP_NUM_57502); cctx->chip_num == CHIP_NUM_57502);
} }
static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
{
return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
}
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res) static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
{ {
return bnxt_qplib_is_chip_gen_p5(res->cctx) ? return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL; HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
} }
static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx) static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
{ {
return bnxt_qplib_is_chip_gen_p5(cctx) ? return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
RING_ALLOC_REQ_RING_TYPE_NQ : RING_ALLOC_REQ_RING_TYPE_NQ :
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
} }
@ -488,7 +502,7 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
u32 type; u32 type;
type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
if (bnxt_qplib_is_chip_gen_p5(cctx)) if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
bnxt_qplib_ring_db(info, type); bnxt_qplib_ring_db(info, type);
else else
bnxt_qplib_ring_db32(info, arm); bnxt_qplib_ring_db32(info, arm);

View File

@ -59,7 +59,7 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{ {
u16 pcie_ctl2 = 0; u16 pcie_ctl2 = 0;
if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx)) if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
return false; return false;
pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2); pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
@ -133,10 +133,12 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* reporting the max number * reporting the max number
*/ */
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ? attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
6 : sb->max_sge; 6 : sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
attr->max_cq_sges = attr->max_qp_sges; attr->max_cq_sges = attr->max_qp_sges;
attr->max_mr = le32_to_cpu(sb->max_mr); attr->max_mr = le32_to_cpu(sb->max_mr);
attr->max_mw = le32_to_cpu(sb->max_mw); attr->max_mw = le32_to_cpu(sb->max_mw);
@ -151,9 +153,17 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq_sges = sb->max_srq_sge; attr->max_srq_sges = sb->max_srq_sge;
attr->max_pkey = 1; attr->max_pkey = 1;
attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) * if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->l2_db_size = (sb->l2_db_space_size + 1) *
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
/*
* Read the max gid supported by HW.
* For each entry in HW GID in HW table, we consume 2
* GID entries in the kernel GID table. So max_gid reported
* to stack can be up to twice the value reported by the HW, up to 256 gids.
*/
attr->max_sgid = le32_to_cpu(sb->max_gid);
attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
bnxt_qplib_query_version(rcfw, attr->fw_ver); bnxt_qplib_query_version(rcfw, attr->fw_ver);
@ -934,7 +944,7 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
req->inactivity_th = cpu_to_le16(cc_param->inact_th); req->inactivity_th = cpu_to_le16(cc_param->inact_th);
/* For chip gen P5 onwards fill extended cmd and header */ /* For chip gen P5 onwards fill extended cmd and header */
if (bnxt_qplib_is_chip_gen_p5(res->cctx)) { if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
struct roce_tlv *hdr; struct roce_tlv *hdr;
u32 payload; u32 payload;
u32 chunks; u32 chunks;

View File

@ -55,6 +55,7 @@ struct bnxt_qplib_dev_attr {
u32 max_qp_wqes; u32 max_qp_wqes;
u32 max_qp_sges; u32 max_qp_sges;
u32 max_cq; u32 max_cq;
#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
u32 max_cq_wqes; u32 max_cq_wqes;
u32 max_cq_sges; u32 max_cq_sges;
u32 max_mr; u32 max_mr;

View File

@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENOMEM; err = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) { if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4) if (iptype == 4)
pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
else if (IS_ENABLED(CONFIG_IPV6)) else if (IS_ENABLED(CONFIG_IPV6))
for_each_netdev(&init_net, pdev) { for_each_netdev(&init_net, pdev) {
if (ipv6_chk_addr(&init_net, if (ipv6_chk_addr(&init_net,
@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
if (is_vlan_dev(pdev))
pdev = vlan_dev_real_dev(pdev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos)); n, pdev, rt_tos2priority(tos));
if (!ep->l2t) { if (!ep->l2t)
dev_put(pdev);
goto out; goto out;
}
ep->mtu = pdev->mtu; ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev); ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->rss_qid = cdev->rdev.lldi.rxq_ids[ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step]; cxgb4_port_idx(pdev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
dev_put(pdev);
} else { } else {
pdev = get_real_dev(n->dev); pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,

View File

@ -3630,7 +3630,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
/** /**
* irdma_accept - registered call for connection to be accepted * irdma_accept - registered call for connection to be accepted
* @cm_id: cm information for passive connection * @cm_id: cm information for passive connection
* @conn_param: accpet parameters * @conn_param: accept parameters
*/ */
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{ {

View File

@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
static u64 srpt_service_guid; static u64 srpt_service_guid;
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
module_param(srp_max_req_size, int, 0444); module_param(srp_max_req_size, int, 0444);
@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_process_wait_list(struct srpt_rdma_ch *ch); static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
/* Type of the entries in srpt_memory_caches. */
struct srpt_memory_cache_entry {
refcount_t ref;
struct kmem_cache *c;
};
static struct kmem_cache *srpt_cache_get(unsigned int object_size)
{
struct srpt_memory_cache_entry *e;
char name[32];
void *res;
guard(mutex)(&srpt_mc_mutex);
e = xa_load(&srpt_memory_caches, object_size);
if (e) {
refcount_inc(&e->ref);
return e->c;
}
snprintf(name, sizeof(name), "srpt-%u", object_size);
e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return NULL;
refcount_set(&e->ref, 1);
e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
if (!e->c)
goto free_entry;
res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
if (xa_is_err(res))
goto destroy_cache;
return e->c;
destroy_cache:
kmem_cache_destroy(e->c);
free_entry:
kfree(e);
return NULL;
}
static void srpt_cache_put(struct kmem_cache *c)
{
struct srpt_memory_cache_entry *e = NULL;
unsigned long object_size;
guard(mutex)(&srpt_mc_mutex);
xa_for_each(&srpt_memory_caches, object_size, e)
if (e->c == c)
break;
if (WARN_ON_ONCE(!e))
return;
if (!refcount_dec_and_test(&e->ref))
return;
WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
kmem_cache_destroy(e->c);
kfree(e);
}
/* /*
* The only allowed channel state changes are those that change the channel * The only allowed channel state changes are those that change the channel
* state into a state with a higher numerical value. Hence the new > prev test. * state into a state with a higher numerical value. Hence the new > prev test.
@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size, ch->sport->sdev, ch->rq_size,
ch->rsp_buf_cache, DMA_TO_DEVICE); ch->rsp_buf_cache, DMA_TO_DEVICE);
kmem_cache_destroy(ch->rsp_buf_cache); srpt_cache_put(ch->rsp_buf_cache);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size, sdev, ch->rq_size,
ch->req_buf_cache, DMA_FROM_DEVICE); ch->req_buf_cache, DMA_FROM_DEVICE);
kmem_cache_destroy(ch->req_buf_cache); srpt_cache_put(ch->req_buf_cache);
kref_put(&ch->kref, srpt_free_ch); kref_put(&ch->kref, srpt_free_ch);
} }
@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
INIT_LIST_HEAD(&ch->cmd_wait_list); INIT_LIST_HEAD(&ch->cmd_wait_list);
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size, ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
512, 0, NULL);
if (!ch->rsp_buf_cache) if (!ch->rsp_buf_cache)
goto free_ch; goto free_ch;
@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
alignment_offset = round_up(imm_data_offset, 512) - alignment_offset = round_up(imm_data_offset, 512) -
imm_data_offset; imm_data_offset;
req_sz = alignment_offset + imm_data_offset + srp_max_req_size; req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz, ch->req_buf_cache = srpt_cache_get(req_sz);
512, 0, NULL);
if (!ch->req_buf_cache) if (!ch->req_buf_cache)
goto free_rsp_ring; goto free_rsp_ring;
@ -2478,7 +2535,7 @@ free_recv_ring:
ch->req_buf_cache, DMA_FROM_DEVICE); ch->req_buf_cache, DMA_FROM_DEVICE);
free_recv_cache: free_recv_cache:
kmem_cache_destroy(ch->req_buf_cache); srpt_cache_put(ch->req_buf_cache);
free_rsp_ring: free_rsp_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@ -2486,7 +2543,7 @@ free_rsp_ring:
ch->rsp_buf_cache, DMA_TO_DEVICE); ch->rsp_buf_cache, DMA_TO_DEVICE);
free_rsp_cache: free_rsp_cache:
kmem_cache_destroy(ch->rsp_buf_cache); srpt_cache_put(ch->rsp_buf_cache);
free_ch: free_ch:
if (rdma_cm_id) if (rdma_cm_id)
@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
sdev->srq_size, sdev->req_buf_cache, sdev->srq_size, sdev->req_buf_cache,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kmem_cache_destroy(sdev->req_buf_cache); srpt_cache_put(sdev->req_buf_cache);
sdev->srq = NULL; sdev->srq = NULL;
} }
@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
sdev->device->attrs.max_srq_wr, dev_name(&device->dev)); sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf", sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
srp_max_req_size, 0, 0, NULL);
if (!sdev->req_buf_cache) if (!sdev->req_buf_cache)
goto free_srq; goto free_srq;
@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
return 0; return 0;
free_cache: free_cache:
kmem_cache_destroy(sdev->req_buf_cache); srpt_cache_put(sdev->req_buf_cache);
free_srq: free_srq:
ib_destroy_srq(srq); ib_destroy_srq(srq);

View File

@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) { if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++; dev->stats.tx_errors++;
goto out; goto len_error;
} }
/* Save skb pointer. */ /* Save skb pointer. */
@ -575,6 +575,7 @@ frag_map_error:
map_error: map_error:
if (net_ratelimit()) if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_warn(greth->dev, "Could not create TX DMA mapping\n");
len_error:
dev_kfree_skb(skb); dev_kfree_skb(skb);
out: out:
return err; return err;

View File

@ -2298,7 +2298,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
if (!(cfg & BIT_ULL(12))) if (!(cfg & BIT_ULL(12)))
continue; continue;
bmap |= (1 << i); bmap |= BIT_ULL(i);
cfg &= ~BIT_ULL(12); cfg &= ~BIT_ULL(12);
rvu_write64(rvu, blkaddr, rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
@ -2319,7 +2319,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
if (!(bmap & (1 << i))) if (!(bmap & BIT_ULL(i)))
continue; continue;
cfg = rvu_read64(rvu, blkaddr, cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));

View File

@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ; value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP; value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 500ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN; value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
return err; return err;
} }
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN; value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET; value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); usleep_range(10, 20); /* 50ns min delay needed as per HW design */
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY; value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL); writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
msleep(30); /* 30ms delay needed as per HW design */
value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value, err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS, value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
500, 500 * 2000); 500, 500 * 2000);

View File

@ -845,6 +845,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n"); netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
desc_set_phys_addr(lp, phys, cur_p); desc_set_phys_addr(lp, phys, cur_p);
@ -865,6 +866,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
true, NULL, 0); true, NULL, 0);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
desc_set_phys_addr(lp, phys, cur_p); desc_set_phys_addr(lp, phys, cur_p);

View File

@ -151,19 +151,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
return sa; return sa;
} }
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
{
struct macsec_rx_sa *sa = NULL;
int an;
for (an = 0; an < MACSEC_NUM_AN; an++) {
sa = macsec_rxsa_get(rx_sc->sa[an]);
if (sa)
break;
}
return sa;
}
static void free_rx_sc_rcu(struct rcu_head *head) static void free_rx_sc_rcu(struct rcu_head *head)
{ {
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
@ -1205,15 +1192,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
/* If validateFrames is Strict or the C bit in the /* If validateFrames is Strict or the C bit in the
* SecTAG is set, discard * SecTAG is set, discard
*/ */
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
if (hdr->tci_an & MACSEC_TCI_C || if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames == MACSEC_VALIDATE_STRICT) { secy->validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&rxsc_stats->syncp); u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++; rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp); u64_stats_update_end(&rxsc_stats->syncp);
DEV_STATS_INC(secy->netdev, rx_errors); DEV_STATS_INC(secy->netdev, rx_errors);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa; goto drop_nosa;
} }
@ -1223,8 +1207,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp); u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsUnusedSA++; rxsc_stats->stats.InPktsUnusedSA++;
u64_stats_update_end(&rxsc_stats->syncp); u64_stats_update_end(&rxsc_stats->syncp);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
goto deliver; goto deliver;
} }

View File

@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev; nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) { if (!devl_trylock(priv_to_devlink(nsim_dev))) {
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1); queue_delayed_work(system_unbound_wq,
&nsim_dev->trap_data->trap_report_dw, 1);
return; return;
} }
@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
continue; continue;
nsim_dev_trap_report(nsim_dev_port); nsim_dev_trap_report(nsim_dev_port);
cond_resched();
} }
devl_unlock(priv_to_devlink(nsim_dev)); devl_unlock(priv_to_devlink(nsim_dev));
queue_delayed_work(system_unbound_wq,
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, &nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
} }
static int nsim_dev_traps_init(struct devlink *devlink) static int nsim_dev_traps_init(struct devlink *devlink)
@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw, INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work); nsim_dev_trap_report_work);
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, queue_delayed_work(system_unbound_wq,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS)); &nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
return 0; return 0;

View File

@ -1874,6 +1874,7 @@ out1:
* may trigger an error resubmitting itself and, worse, * may trigger an error resubmitting itself and, worse,
* schedule a timer. So we kill it all just in case. * schedule a timer. So we kill it all just in case.
*/ */
usbnet_mark_going_away(dev);
cancel_work_sync(&dev->kevent); cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay); del_timer_sync(&dev->delay);
free_percpu(net->tstats); free_percpu(net->tstats);

View File

@ -14,10 +14,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
} }
enum task_work_notify_mode { enum task_work_notify_mode {
TWA_NONE, TWA_NONE = 0,
TWA_RESUME, TWA_RESUME,
TWA_SIGNAL, TWA_SIGNAL,
TWA_SIGNAL_NO_IPI, TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
TWA_FLAGS = 0xff00,
TWAF_NO_ALLOC = 0x0100,
}; };
static inline bool task_work_pending(struct task_struct *task) static inline bool task_work_pending(struct task_struct *task)

View File

@ -2799,10 +2799,16 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
b->module = mod; b->module = mod;
b->offset = offset; b->offset = offset;
/* sort() reorders entries by value, so b may no longer point
* to the right entry after this
*/
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
kfunc_btf_cmp_by_off, NULL); kfunc_btf_cmp_by_off, NULL);
} else {
btf = b->btf;
} }
return b->btf;
return btf;
} }
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
@ -6137,10 +6143,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
/* both of s64_max/s64_min positive or negative */ /* both of s64_max/s64_min positive or negative */
if ((s64_max >= 0) == (s64_min >= 0)) { if ((s64_max >= 0) == (s64_min >= 0)) {
reg->smin_value = reg->s32_min_value = s64_min; reg->s32_min_value = reg->smin_value = s64_min;
reg->smax_value = reg->s32_max_value = s64_max; reg->s32_max_value = reg->smax_value = s64_max;
reg->umin_value = reg->u32_min_value = s64_min; reg->u32_min_value = reg->umin_value = s64_min;
reg->umax_value = reg->u32_max_value = s64_max; reg->u32_max_value = reg->umax_value = s64_max;
reg->var_off = tnum_range(s64_min, s64_max); reg->var_off = tnum_range(s64_min, s64_max);
return; return;
} }

View File

@ -12304,7 +12304,9 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
return; return;
if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
return; return;
task_work_add(curr, work, TWA_RESUME);
/* No page allocation under rq lock */
task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
} }
void sched_mm_cid_exit_signals(struct task_struct *t) void sched_mm_cid_exit_signals(struct task_struct *t)

View File

@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/irq_work.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/task_work.h> #include <linux/task_work.h>
#include <linux/resume_user_mode.h> #include <linux/resume_user_mode.h>
static struct callback_head work_exited; /* all we need is ->next == NULL */ static struct callback_head work_exited; /* all we need is ->next == NULL */
static void task_work_set_notify_irq(struct irq_work *entry)
{
test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
}
static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
/** /**
* task_work_add - ask the @task to execute @work->func() * task_work_add - ask the @task to execute @work->func()
* @task: the task which should run the callback * @task: the task which should run the callback
@ -12,7 +20,7 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
* @notify: how to notify the targeted task * @notify: how to notify the targeted task
* *
* Queue @work for task_work_run() below and notify the @task if @notify * Queue @work for task_work_run() below and notify the @task if @notify
* is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI. * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
* *
* @TWA_SIGNAL works like signals, in that the it will interrupt the targeted * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
* task and run the task_work, regardless of whether the task is currently * task and run the task_work, regardless of whether the task is currently
@ -24,6 +32,8 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
* kernel anyway. * kernel anyway.
* @TWA_RESUME work is run only when the task exits the kernel and returns to * @TWA_RESUME work is run only when the task exits the kernel and returns to
* user mode, or before entering guest mode. * user mode, or before entering guest mode.
* @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
* current @task and if the current context is NMI.
* *
* Fails if the @task is exiting/exited and thus it can't process this @work. * Fails if the @task is exiting/exited and thus it can't process this @work.
* Otherwise @work->func() will be called when the @task goes through one of * Otherwise @work->func() will be called when the @task goes through one of
@ -43,9 +53,25 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
enum task_work_notify_mode notify) enum task_work_notify_mode notify)
{ {
struct callback_head *head; struct callback_head *head;
int flags = notify & TWA_FLAGS;
/* record the work call stack in order to print it in KASAN reports */ notify &= ~TWA_FLAGS;
kasan_record_aux_stack(work); if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
} else {
/*
* Record the work call stack in order to print it in KASAN
* reports.
*
* Note that stack allocation can fail if TWAF_NO_ALLOC flag
* is set and new page is needed to expand the stack buffer.
*/
if (flags & TWAF_NO_ALLOC)
kasan_record_aux_stack_noalloc(work);
else
kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works); head = READ_ONCE(task->task_works);
do { do {
@ -66,6 +92,9 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
case TWA_SIGNAL_NO_IPI: case TWA_SIGNAL_NO_IPI:
__set_notify_signal(task); __set_notify_signal(task);
break; break;
case TWA_NMI_CURRENT:
irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
break; break;

View File

@ -283,17 +283,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
/* Account for reference dev->ip_ptr (below) */ /* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1); refcount_set(&in_dev->refcnt, 1);
err = devinet_sysctl_register(in_dev); if (dev != blackhole_netdev) {
if (err) { err = devinet_sysctl_register(in_dev);
in_dev->dead = 1; if (err) {
neigh_parms_release(&arp_tbl, in_dev->arp_parms); in_dev->dead = 1;
in_dev_put(in_dev); neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev = NULL; in_dev_put(in_dev);
goto out; in_dev = NULL;
goto out;
}
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
} }
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */ /* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev); rcu_assign_pointer(dev->ip_ptr, in_dev);
@ -332,6 +334,19 @@ static void inetdev_destroy(struct in_device *in_dev)
in_dev_put(in_dev); in_dev_put(in_dev);
} }
static int __init inet_blackhole_dev_init(void)
{
int err = 0;
rtnl_lock();
if (!inetdev_init(blackhole_netdev))
err = -ENOMEM;
rtnl_unlock();
return err;
}
late_initcall(inet_blackhole_dev_init);
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{ {
const struct in_ifaddr *ifa; const struct in_ifaddr *ifa;

View File

@ -753,7 +753,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
write_lock(&sn->pnetids_ndev.lock); write_lock(&sn->pnetids_ndev.lock);
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) { list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
if (smc_pnet_match(pnetid, pe->pnetid)) { if (smc_pnet_match(pnetid, pi->pnetid)) {
refcount_inc(&pi->refcnt); refcount_inc(&pi->refcnt);
kfree(pe); kfree(pe);
goto unlock; goto unlock;

View File

@ -648,8 +648,10 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_tx_wait_no_pending_sends(lnk); smc_wr_tx_wait_no_pending_sends(lnk);
percpu_ref_kill(&lnk->wr_reg_refs); percpu_ref_kill(&lnk->wr_reg_refs);
wait_for_completion(&lnk->reg_ref_comp); wait_for_completion(&lnk->reg_ref_comp);
percpu_ref_exit(&lnk->wr_reg_refs);
percpu_ref_kill(&lnk->wr_tx_refs); percpu_ref_kill(&lnk->wr_tx_refs);
wait_for_completion(&lnk->tx_ref_comp); wait_for_completion(&lnk->tx_ref_comp);
percpu_ref_exit(&lnk->wr_tx_refs);
if (lnk->wr_rx_dma_addr) { if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@ -912,11 +914,13 @@ int smc_wr_create_link(struct smc_link *lnk)
init_waitqueue_head(&lnk->wr_reg_wait); init_waitqueue_head(&lnk->wr_reg_wait);
rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL); rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
if (rc) if (rc)
goto dma_unmap; goto cancel_ref;
init_completion(&lnk->reg_ref_comp); init_completion(&lnk->reg_ref_comp);
init_waitqueue_head(&lnk->wr_rx_empty_wait); init_waitqueue_head(&lnk->wr_rx_empty_wait);
return rc; return rc;
cancel_ref:
percpu_ref_exit(&lnk->wr_tx_refs);
dma_unmap: dma_unmap:
if (lnk->wr_rx_v2_dma_addr) { if (lnk->wr_rx_v2_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr, ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,

View File

@ -1411,8 +1411,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
kctrl = snd_hda_gen_add_kctl(&spec->gen, "Line Out Playback Volume", kctrl = snd_hda_gen_add_kctl(&spec->gen, "Line Out Playback Volume",
&cs42l42_dac_volume_mixer); &cs42l42_dac_volume_mixer);
/* Update Line Out kcontrol template */ /* Update Line Out kcontrol template */
kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1, if (kctrl)
HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE; kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1,
HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE;
cs8409_enable_ur(codec, 0); cs8409_enable_ur(codec, 0);
snd_hda_codec_set_name(codec, "CS8409/CS42L42"); snd_hda_codec_set_name(codec, "CS8409/CS42L42");
break; break;

View File

@ -200,7 +200,7 @@ $(OUTPUT)/%:%.c
ifeq ($(SRCARCH),x86) ifeq ($(SRCARCH),x86)
LLD := lld LLD := lld
else else
LLD := ld LLD := $(shell command -v $(LD))
endif endif
# Filter out -static for liburandom_read.so and its dependent targets so that static builds # Filter out -static for liburandom_read.so and its dependent targets so that static builds