Merge branch 'v5.15/standard/base' into v5.15/standard/preempt-rt/cn-sdkv5.4/octeon

This commit is contained in:
Bruce Ashfield 2025-10-06 13:13:31 -04:00
commit e8e2a8e6ee
133 changed files with 1471 additions and 538 deletions

View File

@ -41,7 +41,7 @@ properties:
- const: dma_intr2
clocks:
minItems: 1
maxItems: 1
clock-names:
const: sw_baud

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 193
SUBLEVEL = 194
EXTRAVERSION =
NAME = Trick or Treat

View File

@ -161,7 +161,7 @@
cpu-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
thermal-sensors = <&tmu 0>;
thermal-sensors = <&tmu 1>;
trips {
cpu_alert0: trip0 {
temperature = <85000>;
@ -191,7 +191,7 @@
soc-thermal {
polling-delay-passive = <250>;
polling-delay = <2000>;
thermal-sensors = <&tmu 1>;
thermal-sensors = <&tmu 0>;
trips {
soc_alert0: trip0 {
temperature = <85000>;

View File

@ -1225,10 +1225,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
device_set_wakeup_capable(&vu_dev->vdev.dev, true);
rc = register_virtio_device(&vu_dev->vdev);
if (rc)
if (rc) {
put_device(&vu_dev->vdev.dev);
return rc;
}
vu_dev->registered = 1;
return rc;
return 0;
error_init:
os_close_file(vu_dev->sock);

View File

@ -544,6 +544,17 @@ void kvm_set_cpu_caps(void)
0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
F(SME_COHERENT));
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
BIT(0) /* NO_NESTED_DATA_BP */ |
BIT(2) /* LFENCE Always serializing */ | 0 /* SmmPgCfgLock */ |
BIT(5) /* The memory form of VERW mitigates TSA */ |
BIT(6) /* NULL_SEL_CLR_BASE */ | 0 /* PrefetchCtlMsr */
);
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(2) /* LFENCE Always serializing */;
if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
kvm_cpu_caps[CPUID_8000_0021_EAX] |= BIT(6) /* NULL_SEL_CLR_BASE */;
kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
@ -553,12 +564,15 @@ void kvm_set_cpu_caps(void)
if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
kvm_cpu_cap_mask(CPUID_8000_0021_EAX, F(VERW_CLEAR));
kvm_cpu_cap_check_and_set(X86_FEATURE_VERW_CLEAR);
kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
F(TSA_SQ_NO) | F(TSA_L1_NO)
);
kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO);
kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO);
/*
* Hide RDTSCP and RDPID if either feature is reported as supported but
* probing MSR_TSC_AUX failed. This is purely a sanity check and
@ -1005,18 +1019,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x80000021:
entry->ebx = entry->ecx = entry->edx = 0;
/*
* Pass down these bits:
* EAX 0 NNDBP, Processor ignores nested data breakpoints
* EAX 2 LAS, LFENCE always serializing
* EAX 6 NSCB, Null selector clear base
*
* Other defined bits are for MSRs that KVM does not expose:
* EAX 3 SPCL, SMM page configuration lock
* EAX 13 PCMSR, Prefetch control MSR
*/
entry->eax &= BIT(0) | BIT(2) | BIT(6);
entry->ebx = entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0021_EAX);
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:

View File

@ -3666,8 +3666,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
if (nested_svm_virtualize_tpr(vcpu) ||
kvm_vcpu_apicv_active(vcpu))
if (nested_svm_virtualize_tpr(vcpu))
return;
cr8 = kvm_get_cr8(vcpu);

View File

@ -862,6 +862,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
if (ctx->write) {
release_sock(sk);
return -EBUSY;
}
ctx->write = true;
if (ctx->init && !ctx->more) {
if (ctx->used) {
err = -EINVAL;
@ -969,6 +975,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unlock:
af_alg_data_wakeup(sk);
ctx->write = false;
release_sock(sk);
return copied ?: err;

View File

@ -2853,6 +2853,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("cpufreq: supports frequency invariance\n");
}
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@ -2874,21 +2883,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("supports frequency invariance");
}
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
if (!cpufreq_driver->setpolicy)
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);

View File

@ -1253,13 +1253,17 @@ static int bam_dma_probe(struct platform_device *pdev)
if (bdev->controlled_remotely) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
if (ret)
if (ret) {
dev_err(bdev->dev, "num-channels unspecified in dt\n");
return ret;
}
ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
&bdev->num_ees);
if (ret)
if (ret) {
dev_err(bdev->dev, "num-ees unspecified in dt\n");
return ret;
}
}
if (bdev->controlled_remotely)

View File

@ -2121,8 +2121,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
* priority. So Q0 is the highest priority queue and the last queue has
* the lowest priority.
*/
queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
GFP_KERNEL);
queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
sizeof(*queue_priority_map), GFP_KERNEL);
if (!queue_priority_map)
return -ENOMEM;

View File

@ -127,7 +127,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;

View File

@ -302,8 +302,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
ring->adev->rings[ring->idx] = NULL;
}
/**

View File

@ -1822,7 +1822,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@ -1844,8 +1844,10 @@ static int anx7625_i2c_probe(struct i2c_client *client,
}
/* Add work function */
if (platform->pdata.intp_irq)
if (platform->pdata.intp_irq) {
enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
}
platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = client->dev.of_node;

View File

@ -2040,8 +2040,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
if (!mhdp_state->current_mode)
return;
if (!mhdp_state->current_mode) {
ret = -EINVAL;
goto out;
}
drm_mode_set_name(mhdp_state->current_mode);

View File

@ -724,8 +724,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
if (hdmi_dev) {
pdev = hdmi_dev->dev;
pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
pci_set_drvdata(pdev, NULL);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);

View File

@ -31,8 +31,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
WARN_ON(source_min > source_max);
WARN_ON(target_min > target_max);
if (WARN_ON(source_min >= source_max) ||
WARN_ON(target_min > target_max))
return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);

View File

@ -5293,7 +5293,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
if (DISPLAY_VER(dev_priv) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
}
@ -5754,11 +5754,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
if (table[config].page_mask == 0) {
drm_dbg(&dev_priv->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
intel_de_write(dev_priv, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);

View File

@ -191,6 +191,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;

View File

@ -1147,6 +1147,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
* after suspend fixable with nomux + reset + noloop + nopnp. Luckily,

View File

@ -20,7 +20,9 @@
#include <media/v4l2-subdev.h>
#define IMX214_DEFAULT_CLK_FREQ 24000000
#define IMX214_DEFAULT_LINK_FREQ 480000000
#define IMX214_DEFAULT_LINK_FREQ 600000000
/* Keep wrong link frequency for backward compatibility */
#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
@ -892,17 +894,26 @@ static int imx214_parse_fwnode(struct device *dev)
goto done;
}
for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
if (bus_cfg.nr_of_link_frequencies != 1)
dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
break;
if (i == bus_cfg.nr_of_link_frequencies) {
dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
IMX214_DEFAULT_LINK_FREQ);
ret = -EINVAL;
goto done;
if (bus_cfg.link_frequencies[i] ==
IMX214_DEFAULT_LINK_FREQ_LEGACY) {
dev_warn(dev,
"link-frequencies %d not supported, please review your DT. Continuing anyway\n",
IMX214_DEFAULT_LINK_FREQ);
break;
}
}
if (i == bus_cfg.nr_of_link_frequencies)
ret = dev_err_probe(dev, -EINVAL,
"link-frequencies %d not supported, please review your DT\n",
IMX214_DEFAULT_LINK_FREQ);
done:
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(endpoint);

View File

@ -513,7 +513,11 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
if (is_ext)
inst->vpu_inst.id = SCP_IPI_VENC_H264;
else
inst->vpu_inst.id = IPI_VENC_H264;
inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
mtk_vcodec_debug_enter(inst);

View File

@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}

View File

@ -1378,13 +1378,23 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
return ret;
/*
* The write cycle timing is directly matching tWC, but is also
* Read setup timing depends on the operation done on the NAND:
*
* NRD_SETUP = max(tAR, tCLR)
*/
timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
ncycles = DIV_ROUND_UP(timeps, mckperiodps);
totalcycles += ncycles;
ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
if (ret)
return ret;
/*
* The read cycle timing is directly matching tRC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
* NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
*
* NRD_SETUP is always 0.
* NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
ncycles = max(totalcycles, ncycles);

View File

@ -261,6 +261,7 @@ struct stm32_fmc2_nfc {
struct sg_table dma_data_sg;
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
dma_addr_t dma_ecc_addr;
int dma_ecc_len;
struct completion complete;
@ -860,8 +861,8 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
eccsteps, dma_data_dir);
if (ret < 0)
return ret;
if (!ret)
return -EIO;
desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
eccsteps, dma_transfer_dir,
@ -883,24 +884,19 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw) {
/* Configure DMA ECC status */
p = nfc->ecc_buf;
for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
sg_set_buf(sg, p, nfc->dma_ecc_len);
p += nfc->dma_ecc_len;
sg_dma_address(sg) = nfc->dma_ecc_addr +
s * nfc->dma_ecc_len;
sg_dma_len(sg) = nfc->dma_ecc_len;
}
ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
if (ret < 0)
goto err_unmap_data;
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
nfc->dma_ecc_sg.sgl,
eccsteps, dma_transfer_dir,
DMA_PREP_INTERRUPT);
if (!desc_ecc) {
ret = -ENOMEM;
goto err_unmap_ecc;
goto err_unmap_data;
}
reinit_completion(&nfc->dma_ecc_complete);
@ -908,7 +904,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
desc_ecc->callback_param = &nfc->dma_ecc_complete;
ret = dma_submit_error(dmaengine_submit(desc_ecc));
if (ret)
goto err_unmap_ecc;
goto err_unmap_data;
dma_async_issue_pending(nfc->dma_ecc_ch);
}
@ -928,7 +924,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw)
dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
goto err_unmap_ecc;
goto err_unmap_data;
}
/* Wait DMA data transfer completion */
@ -948,11 +944,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
}
}
err_unmap_ecc:
if (!write_data && !raw)
dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
err_unmap_data:
dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
@ -975,9 +966,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
/* Write oob */
if (oob_required) {
ret = nand_change_write_column_op(chip, mtd->writesize,
chip->oob_poi, mtd->oobsize,
false);
unsigned int offset_in_page = mtd->writesize;
const void *buf = chip->oob_poi;
unsigned int len = mtd->oobsize;
if (!raw) {
struct mtd_oob_region oob_free;
mtd_ooblayout_free(mtd, 0, &oob_free);
offset_in_page += oob_free.offset;
buf += oob_free.offset;
len = oob_free.length;
}
ret = nand_change_write_column_op(chip, offset_in_page,
buf, len, false);
if (ret)
return ret;
}
@ -1578,7 +1581,8 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
return ret;
/* Allocate a buffer to store ECC status registers */
nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
&nfc->dma_ecc_addr, GFP_KERNEL);
if (!nfc->ecc_buf)
return -ENOMEM;

View File

@ -4,6 +4,7 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
#include <linux/units.h>
#include <linux/can/dev.h>
#ifdef CONFIG_CAN_CALC_BITTIMING
@ -81,9 +82,9 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
if (bt->sample_point) {
sample_point_nominal = bt->sample_point;
} else {
if (bt->bitrate > 800 * CAN_KBPS)
if (bt->bitrate > 800 * KILO /* BPS */)
sample_point_nominal = 750;
else if (bt->bitrate > 500 * CAN_KBPS)
else if (bt->bitrate > 500 * KILO /* BPS */)
sample_point_nominal = 800;
else
sample_point_nominal = 875;
@ -182,9 +183,12 @@ void can_calc_tdco(struct net_device *dev)
struct can_tdc *tdc = &priv->tdc;
const struct can_tdc_const *tdc_const = priv->tdc_const;
if (!tdc_const)
if (!tdc_const ||
!(priv->ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
* one or two.
@ -193,9 +197,10 @@ void can_calc_tdco(struct net_device *dev)
/* Reuse "normal" sample point and convert it to time quanta */
u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000;
if (sample_point_in_tq < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max);
} else {
tdc->tdco = 0;
priv->ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
}
}
#endif /* CONFIG_CAN_CALC_BITTIMING */

View File

@ -331,6 +331,56 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
* supporting hardware timestamps
*/
int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct hwtstamp_config hwts_cfg = { 0 };
switch (cmd) {
case SIOCSHWTSTAMP: /* set */
if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
return -EFAULT;
if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
return 0;
return -ERANGE;
case SIOCGHWTSTAMP: /* get */
hwts_cfg.tx_type = HWTSTAMP_TX_ON;
hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
return -EFAULT;
return 0;
default:
return -EOPNOTSUPP;
}
}
EXPORT_SYMBOL(can_eth_ioctl_hwts);
/* generic implementation of ethtool_ops::get_ts_info for CAN devices
* supporting hardware timestamps
*/
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
struct ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device

View File

@ -863,7 +863,6 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
int err;
if (!netif_running(ndev))
@ -875,12 +874,7 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
return err;
}
ctlr = readw(&priv->regs->ctlr);
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
ctlr &= ~RCAR_CAN_CTLR_CANM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);

View File

@ -807,6 +807,7 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_open = hi3110_open,
.ndo_stop = hi3110_stop,
.ndo_start_xmit = hi3110_hard_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct of_device_id hi3110_of_match[] = {

View File

@ -748,6 +748,7 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_open = sun4ican_open,
.ndo_stop = sun4ican_close,
.ndo_start_xmit = sun4ican_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct of_device_id sun4ican_of_match[] = {

View File

@ -6,11 +6,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
#include "es58x_core.h"
#include "es581_4.h"
@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = {
.bittiming_const = &es581_4_bittiming_const,
.data_bittiming_const = NULL,
.tdc_const = NULL,
.bitrate_max = 1 * CAN_MBPS,
.clock = {.freq = 50 * CAN_MHZ},
.bitrate_max = 1 * MEGA /* BPS */,
.clock = {.freq = 50 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
.tx_start_of_frame = 0xAFAF,
.rx_start_of_frame = 0xFAFA,

View File

@ -7,14 +7,15 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
* Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <asm/unaligned.h>
#include <linux/crc16.h>
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/crc16.h>
#include <asm/unaligned.h>
#include "es58x_core.h"
@ -1981,7 +1982,13 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
.ndo_start_xmit = es58x_start_xmit
.ndo_start_xmit = es58x_start_xmit,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops es58x_ethtool_ops = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@ -2088,6 +2095,7 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
netdev->netdev_ops = &es58x_netdev_ops;
netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
ret = register_candev(netdev);

View File

@ -6,17 +6,17 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#ifndef __ES58X_COMMON_H__
#define __ES58X_COMMON_H__
#include <linux/types.h>
#include <linux/usb.h>
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/usb.h>
#include "es581_4.h"
#include "es58x_fd.h"

View File

@ -8,11 +8,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
* Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
* Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/units.h>
#include "es58x_core.h"
#include "es58x_fd.h"
@ -427,7 +428,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
&priv->can.data_bittiming);
if (priv->can.tdc.tdco) {
if (can_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
@ -504,8 +505,11 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
* Register" from Microchip.
*/
static const struct can_tdc_const es58x_tdc_const = {
.tdcv_min = 0,
.tdcv_max = 0, /* Manual mode not supported. */
.tdco_min = 0,
.tdco_max = 127,
.tdcf_min = 0,
.tdcf_max = 127
};
@ -518,11 +522,11 @@ const struct es58x_parameters es58x_fd_param = {
* Mbps work in an optimal environment but are not recommended
* for production environment.
*/
.bitrate_max = 8 * CAN_MBPS,
.clock = {.freq = 80 * CAN_MHZ},
.bitrate_max = 8 * MEGA /* BPS */,
.clock = {.freq = 80 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC,
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
.tx_start_of_frame = 0xCEFA, /* FACE in little endian */
.rx_start_of_frame = 0xFECA, /* CAFE in little endian */
.tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN,

View File

@ -769,6 +769,7 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_open = mcba_usb_open,
.ndo_stop = mcba_usb_close,
.ndo_start_xmit = mcba_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
/* Microchip CANBUS has hardcoded bittiming values by default.

View File

@ -89,7 +89,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}

View File

@ -590,14 +590,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@ -630,6 +622,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
}
/**

View File

@ -655,21 +655,35 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
return 0;
}
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
static int gswip_port_setup(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
int err;
if (!dsa_is_user_port(ds, port))
return 0;
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
}
return 0;
}
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct gswip_priv *priv = ds->priv;
if (!dsa_is_cpu_port(ds, port)) {
u32 mdio_phy = 0;
if (phydev)
mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
/* RMON Counter Enable for port */
gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
@ -680,16 +694,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) {
u32 mdio_phy = 0;
if (phydev)
mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
return 0;
}
@ -697,9 +701,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
if (!dsa_is_user_port(ds, port))
return;
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@ -1336,8 +1337,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
int i;
int err;
/* Operation not supported on the CPU port, don't throw errors */
if (!bridge)
return -EINVAL;
return 0;
for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
@ -1802,6 +1804,7 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
.port_setup = gswip_port_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,

View File

@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",

View File

@ -4223,8 +4223,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
cancel_delayed_work(&cp->delete_task);
flush_workqueue(cnic_wq);
cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",

View File

@ -135,7 +135,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);

View File

@ -2682,7 +2682,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
ethsw->bpid = dpbp_attrs.id;
ethsw->bpid = dpbp_attrs.bpid;
return 0;

View File

@ -2033,7 +2033,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
put_device(&phy_dev->mdio.dev);
if (phy_dev)
put_device(&phy_dev->mdio.dev);
}
}

View File

@ -50,6 +50,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32

View File

@ -1918,6 +1918,18 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
switch (hw->mac.type) {
case I40E_MAC_XL710:
return I40E_MAX_NUM_DESCRIPTORS_XL710;
default:
return I40E_MAX_NUM_DESCRIPTORS;
}
}
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@ -1925,8 +1937,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0]->count;
@ -1949,12 +1961,12 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
static int i40e_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
u32 new_rx_count, new_tx_count, max_num_descriptors;
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 new_rx_count, new_tx_count;
u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@ -1962,14 +1974,15 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
max_num_descriptors = i40e_get_max_num_descriptors(pf);
if (ring->tx_pending > max_num_descriptors ||
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
ring->rx_pending > max_num_descriptors ||
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
netdev_info(netdev,
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
ring->tx_pending, ring->rx_pending,
I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors);
return -EINVAL;
}

View File

@ -4152,10 +4152,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
* irq_set_affinity_hint without making a copy.
* irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@ -4166,8 +4166,8 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_set_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, vsi->q_vectors[vector]);
}
return err;
}
@ -4987,7 +4987,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
irq_set_affinity_hint(irq_num, NULL);
irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);

View File

@ -949,9 +949,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
/* prevent any other reads prior to eop_desc */
smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)

View File

@ -446,7 +446,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
wr32(hw, reg_idx, reg);
}
@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
/* ring_len has to be multiple of 8 */
if (!IS_ALIGNED(info->ring_len, 8) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
ret = -EINVAL;
goto error_context;
}
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
@ -718,6 +725,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
rx_ctx.base = info->dma_ring_addr / 128;
/* ring_len has to be multiple of 32 */
if (!IS_ALIGNED(info->ring_len, 32) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
ret = -EINVAL;
goto error_param;
}
rx_ctx.qlen = info->ring_len;
if (info->splithdr_enabled) {
@ -1455,6 +1469,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@ -2121,7 +2136,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@ -2224,6 +2242,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vf->default_lan_addr.addr);
}
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
err:
/* send the response back to the VF */
@ -2386,7 +2405,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
if (idx >= vf->num_tc) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
goto error_param;
}
@ -2407,7 +2426,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
*/
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
if (idx >= vf->num_tc) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
goto error_param;
}
@ -2457,8 +2476,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, queue_id;
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
if (vf->adq_enabled) {
vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
if (vf->adq_enabled && idx < vf->num_tc) {
vsi_id = vf->ch[idx].vsi_id;
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
} else {
queue_id = vsi_queue_id;
@ -3470,7 +3491,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
tc_filter->action_meta > vf->num_tc) {
tc_filter->action_meta >= vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@ -3768,6 +3789,8 @@ err:
aq_ret);
}
#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
/**
* i40e_vc_add_cloud_filter
* @vf: pointer to the VF info
@ -3807,6 +3830,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
goto err_out;
}
if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
dev_warn(&pf->pdev->dev,
"VF %d: Max number of filters reached, can't apply cloud filter\n",
vf->vf_id);
aq_ret = -ENOSPC;
goto err_out;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter)
return -ENOMEM;

View File

@ -39,7 +39,8 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
I40E_VF_STATE_RESETTING
I40E_VF_STATE_RESETTING,
I40E_VF_STATE_RESOURCES_LOADED,
};
/* VF capabilities */

View File

@ -2077,11 +2077,8 @@ static void igb_diag_test(struct net_device *netdev,
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
/* PHY is powered down when interface is down */
if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
else
data[TEST_LINK] = 0;
/* Online tests aren't run; pass by default */
data[TEST_REG] = 0;

View File

@ -21,8 +21,7 @@
#include "rvu.h"
#include "lmac_common.h"
#define DRV_NAME "Marvell-CGX/RPM"
#define DRV_STRING "Marvell CGX/RPM Driver"
#define DRV_NAME "Marvell-CGX-RPM"
#define CGX_RX_STAT_GLOBAL_INDEX 9

View File

@ -104,8 +104,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);

View File

@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
int rx_rc, len;
int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
if (unlikely(!skb))
if (unlikely(!skb)) {
ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
}
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
rx_rc = netif_rx(skb);
if (NET_RX_DROP == rx_rc) {
netdev_mangle_me_harder_failed:
ndev->stats.rx_dropped++;
}
netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);

View File

@ -4374,10 +4374,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
/* Add override window info to buffer */
/* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
PROTECTION_OVERRIDE_ELEMENT_DWORDS;
min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
PROTECTION_OVERRIDE_ELEMENT_DWORDS,
PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,

View File

@ -331,7 +331,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
return 0;
}
static struct platform_driver omap_cf_driver = {
/*
* omap_cf_remove() lives in .exit.text. For drivers registered via
* platform_driver_probe() this is ok because they cannot get unbound at
* runtime. So mark the driver struct with __refdata to prevent modpost
* triggering a section mismatch warning.
*/
static struct platform_driver omap_cf_driver __refdata = {
.driver = {
.name = driver_name,
},

View File

@ -16,10 +16,11 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/property.h>
#include <linux/slab.h>
#define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f
@ -189,7 +190,6 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
{
struct device *dev = &mdiodev->dev;
const struct of_device_id *of_id;
struct phy_provider *phy_provider;
struct device_node *syscon_np;
struct bcm_ns_usb3 *usb3;
@ -203,10 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
usb3->dev = dev;
usb3->mdiodev = mdiodev;
of_id = of_match_device(bcm_ns_usb3_id_table, dev);
if (!of_id)
return -EINVAL;
usb3->family = (enum bcm_ns_family)of_id->data;
usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);

View File

@ -8,9 +8,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset.h>
#define USB_PHY_PLL 0x04
@ -162,8 +163,6 @@ MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
static int phy_berlin_usb_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(phy_berlin_usb_of_match, &pdev->dev);
struct phy_berlin_usb_priv *priv;
struct phy *phy;
struct phy_provider *phy_provider;
@ -180,7 +179,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
if (IS_ERR(priv->rst_ctrl))
return PTR_ERR(priv->rst_ctrl);
priv->pll_divider = *((u32 *)match->data);
priv->pll_divider = *((u32 *)device_get_match_data(&pdev->dev));
phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops);
if (IS_ERR(phy)) {

View File

@ -13,9 +13,10 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@ -171,18 +172,13 @@ static int ralink_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
const struct of_device_id *match;
struct ralink_usb_phy *phy;
match = of_match_device(ralink_usb_phy_of_match, &pdev->dev);
if (!match)
return -ENODEV;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
phy->clk = (uintptr_t)match->data;
phy->clk = (uintptr_t)device_get_match_data(&pdev->dev);
phy->base = NULL;
phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl");

View File

@ -12,10 +12,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@ -63,7 +62,7 @@ struct rockchip_pcie_data {
};
struct rockchip_pcie_phy {
struct rockchip_pcie_data *phy_data;
const struct rockchip_pcie_data *phy_data;
struct regmap *reg_base;
struct phy_pcie_instance {
struct phy *phy;
@ -365,7 +364,6 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
struct rockchip_pcie_phy *rk_phy;
struct phy_provider *phy_provider;
struct regmap *grf;
const struct of_device_id *of_id;
int i;
u32 phy_num;
@ -379,11 +377,10 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
if (!rk_phy)
return -ENOMEM;
of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
if (!of_id)
rk_phy->phy_data = device_get_match_data(&pdev->dev);
if (!rk_phy->phy_data)
return -EINVAL;
rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
rk_phy->reg_base = grf;
mutex_init(&rk_phy->pcie_mutex);

View File

@ -13,10 +13,9 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/regmap.h>
@ -458,7 +457,6 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rockchip_usb_phy_base *phy_base;
struct phy_provider *phy_provider;
const struct of_device_id *match;
struct device_node *child;
int err;
@ -466,14 +464,12 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
if (!phy_base)
return -ENOMEM;
match = of_match_device(dev->driver->of_match_table, dev);
if (!match || !match->data) {
phy_base->pdata = device_get_match_data(dev);
if (!phy_base->pdata) {
dev_err(dev, "missing phy data\n");
return -EINVAL;
}
phy_base->pdata = match->data;
phy_base->dev = dev;
phy_base->reg_base = ERR_PTR(-ENODEV);
if (dev->parent && dev->parent->of_node)

View File

@ -3165,18 +3165,22 @@ tegra210_xusb_padctl_probe(struct device *dev,
}
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev) {
dev_warn(dev, "PMC device is not available\n");
goto out;
}
if (!platform_get_drvdata(pdev))
if (!platform_get_drvdata(pdev)) {
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
if (!padctl->regmap)
dev_info(dev, "failed to find PMC regmap\n");
put_device(&pdev->dev);
out:
return &padctl->base;
}

View File

@ -8,9 +8,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
@ -268,20 +268,15 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
static int omap_control_phy_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct omap_control_phy *control_phy;
of_id = of_match_device(omap_control_phy_id_table, &pdev->dev);
if (!of_id)
return -EINVAL;
control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
GFP_KERNEL);
if (!control_phy)
return -ENOMEM;
control_phy->dev = &pdev->dev;
control_phy->type = *(enum omap_control_phy_type *)of_id->data;
control_phy->type = *(enum omap_control_phy_type *)device_get_match_data(&pdev->dev);
if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
control_phy->otghs_control =

View File

@ -19,6 +19,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@ -362,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy)
phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
}
static void omap_usb2_put_device(void *_dev)
{
struct device *dev = _dev;
put_device(dev);
}
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
@ -371,16 +379,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
const struct of_device_id *of_id;
struct usb_phy_data *phy_data;
const struct usb_phy_data *phy_data;
int ret;
of_id = of_match_device(omap_usb2_id_table, &pdev->dev);
if (!of_id)
phy_data = device_get_match_data(&pdev->dev);
if (!phy_data)
return -EINVAL;
phy_data = (struct usb_phy_data *)of_id->data;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
@ -426,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device,
phy->control_dev);
if (ret)
return ret;
} else {
if (of_property_read_u32_index(node,
"syscon-phy-power", 1,

View File

@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/phy/phy.h>
#include <linux/of.h>
@ -666,12 +667,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
return 0;
}
static void ti_pipe3_put_device(void *_dev)
{
struct device *dev = _dev;
put_device(dev);
}
static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
{
struct device *dev = phy->dev;
struct device_node *node = dev->of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
int ret;
phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
@ -702,6 +711,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
}
phy->control_dev = &control_pdev->dev;
ret = devm_add_action_or_reset(dev, ti_pipe3_put_device,
phy->control_dev);
if (ret)
return ret;
}
if (phy->mode == PIPE3_MODE_PCIE) {
@ -777,23 +791,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
int ret;
const struct of_device_id *match;
struct pipe3_data *data;
const struct pipe3_data *data;
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
match = of_match_device(ti_pipe3_id_table, dev);
if (!match)
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
data = (struct pipe3_data *)match->data;
if (!data) {
dev_err(dev, "no driver data\n");
return -EINVAL;
}
phy->dev = dev;
phy->mode = data->mode;
phy->dpll_map = data->dpll_map;

View File

@ -1872,8 +1872,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
if ((cache.flags & 0xff) == 0xff)
cache.flags = -1; /* read error */
if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
cache.flags = -ENODEV; /* bq27000 hdq read error */
if (cache.flags >= 0) {
cache.temperature = bq27xxx_battery_read_temperature(di);
if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)

View File

@ -80,9 +80,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
if (!regmap)
return -EPROBE_DEFER;
gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
if (IS_ERR(gdp)) {
dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
return PTR_ERR(gdp);
}
@ -102,7 +104,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
}
config.dev = &pdev->dev;
config.dev->of_node = pdev->dev.parent->of_node;
config.regmap = regmap;
rdev = devm_regulator_register(&pdev->dev, &desc, &config);

View File

@ -39,12 +39,14 @@ static bool mdt_header_valid(const struct firmware *fw)
if (phend > fw->size)
return false;
if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
return false;
if (ehdr->e_shentsize || ehdr->e_shnum) {
if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
return false;
shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
if (shend > fw->size)
return false;
shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
if (shend > fw->size)
return false;
}
return true;
}

View File

@ -543,10 +543,10 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
}
/*
* Racy, but harmless, kick thread if there is still pending data.
* Kick thread to flush if there's still pending data
* or to wakeup the write queue.
*/
if (hp->n_outbuf)
hvc_kick();
hvc_kick();
return written;
}

View File

@ -1018,7 +1018,6 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
static int sc16is7xx_startup(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
sc16is7xx_power(port, 1);
@ -1030,16 +1029,6 @@ static int sc16is7xx_startup(struct uart_port *port)
sc16is7xx_port_write(port, SC16IS7XX_FCR_REG,
SC16IS7XX_FCR_FIFO_BIT);
/* Enable EFR */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
regcache_cache_bypass(s->regmap, true);
/* Enable write access to enhanced features and internal clock div */
sc16is7xx_port_write(port, SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_ENABLE_BIT);
/* Enable TCR/TLR */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_TCRTLR_BIT,
@ -1051,7 +1040,8 @@ static int sc16is7xx_startup(struct uart_port *port)
SC16IS7XX_TCR_RX_RESUME(24) |
SC16IS7XX_TCR_RX_HALT(48));
regcache_cache_bypass(s->regmap, false);
/* Disable TCR/TLR access */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_TCRTLR_BIT, 0);
/* Now, initialize the UART */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);

View File

@ -727,7 +727,7 @@ void usb_detect_quirks(struct usb_device *udev)
udev->quirks ^= usb_detect_dynamic_quirks(udev);
if (udev->quirks)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
dev_dbg(&udev->dev, "USB quirks for this device: 0x%x\n",
udev->quirks);
#ifdef CONFIG_USB_DEFAULT_PERSIST

View File

@ -751,7 +751,7 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
struct dummy *dum;
int retval = -EINVAL;
unsigned long flags;
struct dummy_request *req = NULL;
struct dummy_request *req = NULL, *iter;
if (!_ep || !_req)
return retval;
@ -761,25 +761,26 @@ static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
if (!dum->driver)
return -ESHUTDOWN;
local_irq_save(flags);
spin_lock(&dum->lock);
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
list_del_init(&req->queue);
_req->status = -ECONNRESET;
retval = 0;
break;
}
spin_lock_irqsave(&dum->lock, flags);
list_for_each_entry(iter, &ep->queue, queue) {
if (&iter->req != _req)
continue;
list_del_init(&iter->queue);
_req->status = -ECONNRESET;
req = iter;
retval = 0;
break;
}
spin_unlock(&dum->lock);
if (retval == 0) {
dev_dbg(udc_dev(dum),
"dequeued req %p from %s, len %d buf %p\n",
req, _ep->name, _req->length, _req->buf);
spin_unlock(&dum->lock);
usb_gadget_giveback_request(_ep, _req);
spin_lock(&dum->lock);
}
local_irq_restore(flags);
spin_unlock_irqrestore(&dum->lock, flags);
return retval;
}

View File

@ -86,13 +86,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
return string_length;
}
static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
{
struct xhci_ep_ctx *ep_ctx;
unsigned int max_burst;
dma_addr_t deq;
max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
/* Populate bulk out endpoint context: */
ep_ctx = dbc_bulkout_ctx(dbc);
deq = dbc_bulkout_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
/* Populate bulk in endpoint context: */
ep_ctx = dbc_bulkin_ctx(dbc);
deq = dbc_bulkin_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
}
static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
{
struct dbc_info_context *info;
struct xhci_ep_ctx *ep_ctx;
u32 dev_info;
dma_addr_t deq, dma;
unsigned int max_burst;
dma_addr_t dma;
if (!dbc)
return;
@ -106,20 +127,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
info->length = cpu_to_le32(string_length);
/* Populate bulk out endpoint context: */
ep_ctx = dbc_bulkout_ctx(dbc);
max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
deq = dbc_bulkout_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
/* Populate bulk in endpoint context: */
ep_ctx = dbc_bulkin_ctx(dbc);
deq = dbc_bulkin_enq(dbc);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
/* Populate bulk in and out endpoint contexts: */
xhci_dbc_init_ep_contexts(dbc);
/* Set DbC context and info registers: */
lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
@ -421,6 +430,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
return ctx;
}
static void xhci_dbc_ring_init(struct xhci_ring *ring)
{
struct xhci_segment *seg = ring->first_seg;
/* clear all trbs on ring in case of old ring */
memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
/* Only event ring does not use link TRB */
if (ring->type != TYPE_EVENT) {
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
}
xhci_initialize_ring_info(ring, 1);
}
static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
{
struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
if (!in_ring || !out_ring || !dbc->ctx) {
dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
return -ENODEV;
}
xhci_dbc_ring_init(in_ring);
xhci_dbc_ring_init(out_ring);
/* set ep context enqueue, dequeue, and cycle to initial values */
xhci_dbc_init_ep_contexts(dbc);
return 0;
}
static struct xhci_ring *
xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
{
@ -449,15 +494,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
seg->dma = dma;
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
trb->link.segment_ptr = cpu_to_le64(dma);
trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
}
INIT_LIST_HEAD(&ring->td_list);
xhci_initialize_ring_info(ring, 1);
xhci_dbc_ring_init(ring);
return ring;
dma_fail:
kfree(seg);
@ -850,7 +890,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC cable unplugged\n");
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}
@ -860,7 +900,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
writel(portsc, &dbc->regs->portsc);
dbc->state = DS_ENABLED;
xhci_dbc_flush_requests(dbc);
xhci_dbc_reinit_ep_rings(dbc);
return EVT_DISC;
}

View File

@ -1322,7 +1322,18 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1034, 0xff), /* Telit LE910C4-WWX (rmnet) */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1036, 0xff) }, /* Telit LE910C4-WWX */
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1037, 0xff), /* Telit LE910C4-WWX (rmnet) */
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1038, 0xff), /* Telit LE910C4-WWX (rmnet) */
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103b, 0xff), /* Telit LE910C4-WWX */
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x103c, 0xff), /* Telit LE910C4-WWX */
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@ -1369,6 +1380,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1077, 0xff), /* Telit FN990A (rmnet + audio) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1078, 0xff), /* Telit FN990A (MBIM + audio) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1079, 0xff), /* Telit FN990A (RNDIS + audio) */
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */

View File

@ -2479,7 +2479,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
unsigned charcount = font->charcount;
int w = font->width;
int h = font->height;
int size;
int size, alloc_size;
int i, csum;
u8 *new_data, *data = font->data;
int pitch = PITCH(font->width);
@ -2506,9 +2506,16 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
if (fbcon_invalid_charcount(info, charcount))
return -EINVAL;
size = CALC_FONTSZ(h, pitch, charcount);
/* Check for integer overflow in font size calculation */
if (check_mul_overflow(h, pitch, &size) ||
check_mul_overflow(size, charcount, &size))
return -EINVAL;
new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
/* Check for overflow in allocation size calculation */
if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size))
return -EINVAL;
new_data = kmalloc(alloc_size, GFP_USER);
if (!new_data)
return -ENOMEM;

View File

@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/sysfb.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
@ -1795,17 +1794,6 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
do_free = true;
}
/*
* If a driver asked to unregister a platform device registered by
* sysfb, then can be assumed that this is a driver for a display
* that is set up by the system firmware and has a generic driver.
*
* Drivers for devices that don't have a generic driver will never
* ask for this, so let's assume that a real driver for the display
* was already probed and prevent sysfb to register devices later.
*/
sysfb_disable();
mutex_lock(&registration_lock);
do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);

View File

@ -1595,10 +1595,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
while (ptr < end) {
u16 namelen;
if (unlikely(ptr + sizeof(iref) > end)) {
if (unlikely(ptr + sizeof(*iref) > end)) {
inode_ref_err(leaf, slot,
"inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
ptr, end, sizeof(iref));
ptr, end, sizeof(*iref));
return -EUCLEAN;
}

View File

@ -3047,7 +3047,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
.nodeid_out = ff_out->nodeid,
.fh_out = ff_out->fh,
.off_out = pos_out,
.len = len,
.len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
.flags = flags
};
struct fuse_write_out outarg;
@ -3113,6 +3113,9 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
fc->no_copy_file_range = 1;
err = -EOPNOTSUPP;
}
if (!err && outarg.size > len)
err = -EIO;
if (err)
goto out;

View File

@ -519,13 +519,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
/*
* If page is mapped, it was faulted in after being
* unmapped in caller. Unmap (again) now after taking
* the fault mutex. The mutex will prevent faults
* until we finish removing the page.
*
* This race can only happen in the hole punch case.
* Getting here in a truncate operation is a bug.
* unmapped in caller or hugetlb_vmdelete_list() skips
* unmapping it due to fail to grab lock. Unmap (again)
* while holding the fault mutex. The mutex will prevent
* faults until we finish removing the page. Hold page
* lock to guarantee no concurrent migration.
*/
lock_page(page);
if (unlikely(page_mapped(page))) {
BUG_ON(truncate_op);
@ -537,8 +537,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
(index + 1) * pages_per_huge_page(h));
i_mmap_unlock_write(mapping);
}
lock_page(page);
/*
* We must free the huge page and remove from page
* cache (remove_huge_page) BEFORE removing the

View File

@ -548,7 +548,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
case SMB_DIRECT_MSG_DATA_TRANSFER: {
struct smb_direct_data_transfer *data_transfer =
(struct smb_direct_data_transfer *)recvmsg->packet;
unsigned int data_length;
unsigned int data_offset, data_length;
int avail_recvmsg_count, receive_credits;
if (wc->byte_len <
@ -559,14 +559,15 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
data_length = le32_to_cpu(data_transfer->data_length);
if (data_length) {
if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
(u64)data_length) {
put_recvmsg(t, recvmsg);
smb_direct_disconnect_rdma_connection(t);
return;
}
data_offset = le32_to_cpu(data_transfer->data_offset);
if (wc->byte_len < data_offset ||
wc->byte_len < (u64)data_offset + data_length) {
put_recvmsg(t, recvmsg);
smb_direct_disconnect_rdma_connection(t);
return;
}
if (data_length) {
if (t->full_packet_received)
recvmsg->first_segment = true;

View File

@ -862,6 +862,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
if (fsinfo->xattr_support)
server->caps |= NFS_CAP_XATTR;
else
server->caps &= ~NFS_CAP_XATTR;
#endif
}

View File

@ -270,7 +270,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
struct pnfs_layout_segment *l2)
{
const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
u32 i;
if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
@ -750,8 +750,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
continue;
if (check_device &&
nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
// reinitialize the error state in case if this is the last iteration
ds = ERR_PTR(-EINVAL);
continue;
}
*best_idx = idx;
break;
@ -781,7 +784,7 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
struct nfs4_pnfs_ds *ds;
ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
if (ds)
if (!IS_ERR(ds))
return ds;
return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
}
@ -795,7 +798,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
best_idx);
if (ds || !pgio->pg_mirror_idx)
if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
return ds;
return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
}
@ -856,7 +859,7 @@ retry:
req->wb_nio = 0;
ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
if (!ds) {
if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@ -1066,11 +1069,13 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
{
u32 idx = hdr->pgio_mirror_idx + 1;
u32 new_idx = 0;
struct nfs4_pnfs_ds *ds;
if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
ff_layout_send_layouterror(hdr->lseg);
else
ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
if (IS_ERR(ds))
pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
else
ff_layout_send_layouterror(hdr->lseg);
pnfs_read_resend_pnfs(hdr, new_idx);
}

View File

@ -3904,8 +3904,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
}
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
server->caps &=
~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS);
server->fattr_valid = NFS_ATTR_FATTR_V4;
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
@ -3969,7 +3970,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
};
int err;
nfs_server_set_init_caps(server);
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),

View File

@ -1068,7 +1068,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
************************************************************************/
static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
struct attribute *attr, char *buf)
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d.%d\n",
NILFS_CURRENT_REV, NILFS_MINOR_REV);
@ -1080,7 +1080,7 @@ static const char features_readme_str[] =
"(1) revision\n\tshow current revision of NILFS file system driver.\n";
static ssize_t nilfs_feature_README_show(struct kobject *kobj,
struct attribute *attr,
struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, features_readme_str);

View File

@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
struct completion sg_segments_kobj_unregister;
};
#define NILFS_COMMON_ATTR_STRUCT(name) \
#define NILFS_KOBJ_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \
struct attribute attr; \
ssize_t (*show)(struct kobject *, struct attribute *, \
ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
char *); \
ssize_t (*store)(struct kobject *, struct attribute *, \
ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
const char *, size_t); \
}
NILFS_COMMON_ATTR_STRUCT(feature);
NILFS_KOBJ_ATTR_STRUCT(feature);
#define NILFS_DEV_ATTR_STRUCT(name) \
struct nilfs_##name##_attr { \

View File

@ -696,6 +696,8 @@ out:
* it not only handles the fiemap for inlined files, but also deals
* with the fast symlink, cause they have no difference for extent
* mapping per se.
*
* Must be called with ip_alloc_sem semaphore held.
*/
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
struct fiemap_extent_info *fieinfo,
@ -707,6 +709,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
u64 phys;
u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
lockdep_assert_held_read(&oi->ip_alloc_sem);
di = (struct ocfs2_dinode *)di_bh->b_data;
if (ocfs2_inode_is_fast_symlink(inode))
@ -722,8 +725,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
phys += offsetof(struct ocfs2_dinode,
id2.i_data.id_data);
/* Release the ip_alloc_sem to prevent deadlock on page fault */
up_read(&OCFS2_I(inode)->ip_alloc_sem);
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
flags);
down_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret < 0)
return ret;
}
@ -792,9 +798,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
/* Release the ip_alloc_sem to prevent deadlock on page fault */
up_read(&OCFS2_I(inode)->ip_alloc_sem);
ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
len_bytes, fe_flags);
down_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret)
break;

View File

@ -126,6 +126,10 @@ xfs_growfs_data_private(
if (delta < 0 && nagcount < 2)
return -EINVAL;
/* No work to do */
if (delta == 0)
return 0;
oagcount = mp->m_sb.sb_agcount;
/* allocate the new per-ag structures */

View File

@ -136,6 +136,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
* @write: True if we are in the middle of a write.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
* @inflight: Non-zero when AIO requests are in flight.
@ -151,10 +152,11 @@ struct af_alg_ctx {
size_t used;
atomic_t rcvused;
bool more;
bool merge;
bool enc;
bool init;
bool more:1,
merge:1,
enc:1,
write:1,
init:1;
unsigned int len;

View File

@ -12,12 +12,8 @@
#define CAN_SYNC_SEG 1
/* Kilobits and Megabits per second */
#define CAN_KBPS 1000UL
#define CAN_MBPS 1000000UL
/* Megahertz */
#define CAN_MHZ 1000000UL
#define CAN_CTRLMODE_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
@ -33,29 +29,43 @@
*
* This structure contains the parameters to calculate that SSP.
*
* @tdcv: Transmitter Delay Compensation Value. Distance, in time
* quanta, from when the bit is sent on the TX pin to when it is
* received on the RX pin of the transmitter. Possible options:
* -+----------- one bit ----------+-- TX pin
* |<--- Sample Point --->|
*
* 0: automatic mode. The controller dynamically measures @tdcv
* for each transmitted CAN FD frame.
* --+----------- one bit ----------+-- RX pin
* |<-------- TDCV -------->|
* |<------- TDCO ------->|
* |<----------- Secondary Sample Point ---------->|
*
* Other values: manual mode. Use the fixed provided value.
* @tdcv: Transmitter Delay Compensation Value. The time needed for
* the signal to propagate, i.e. the distance, in time quanta,
* from the start of the bit on the TX pin to when it is received
* on the RX pin. @tdcv depends on the controller modes:
*
* CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
* measures @tdcv for each transmitted CAN FD frame and the
* value provided here should be ignored.
*
* CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
* value.
*
* N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
* mutually exclusive. Only one can be set at a time. If both
* CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
* TDC is disabled and all the values of this structure should be
* ignored.
*
* @tdco: Transmitter Delay Compensation Offset. Offset value, in time
* quanta, defining the distance between the start of the bit
* reception on the RX pin of the transceiver and the SSP
* position such that SSP = @tdcv + @tdco.
*
* If @tdco is zero, then TDC is disabled and both @tdcv and
* @tdcf should be ignored.
*
* @tdcf: Transmitter Delay Compensation Filter window. Defines the
* minimum value for the SSP position in time quanta. If SSP is
* less than @tdcf, then no delay compensations occur and the
* normal sampling point is used instead. The feature is enabled
* if and only if @tdcv is set to zero (automatic mode) and @tdcf
* is configured to a value greater than @tdco.
* minimum value for the SSP position in time quanta. If the SSP
* position is less than @tdcf, then no delay compensations occur
* and the normal sampling point is used instead. The feature is
* enabled if and only if @tdcv is set to zero (automatic mode)
* and @tdcf is configured to a value greater than @tdco.
*/
struct can_tdc {
u32 tdcv;
@ -67,19 +77,32 @@ struct can_tdc {
* struct can_tdc_const - CAN hardware-dependent constant for
* Transmission Delay Compensation
*
* @tdcv_max: Transmitter Delay Compensation Value maximum value.
* Should be set to zero if the controller does not support
* manual mode for tdcv.
* @tdcv_min: Transmitter Delay Compensation Value minimum value. If
* the controller does not support manual mode for tdcv
* (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
* ignored.
* @tdcv_max: Transmitter Delay Compensation Value maximum value. If
* the controller does not support manual mode for tdcv
* (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
* ignored.
*
* @tdco_min: Transmitter Delay Compensation Offset minimum value.
* @tdco_max: Transmitter Delay Compensation Offset maximum value.
* Should not be zero. If the controller does not support TDC,
* then the pointer to this structure should be NULL.
*
* @tdcf_min: Transmitter Delay Compensation Filter window minimum
* value. If @tdcf_max is zero, this value is ignored.
* @tdcf_max: Transmitter Delay Compensation Filter window maximum
* value. Should be set to zero if the controller does not
* support this feature.
*/
struct can_tdc_const {
u32 tdcv_min;
u32 tdcv_max;
u32 tdco_min;
u32 tdco_max;
u32 tdcf_min;
u32 tdcf_max;
};

View File

@ -21,6 +21,7 @@
#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@ -96,6 +97,10 @@ struct can_priv {
#endif
};
static inline bool can_tdc_is_enabled(const struct can_priv *priv)
{
return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
}
/* helper to define static CAN controller features at device creation time */
static inline void can_set_static_ctrlmode(struct net_device *dev,
@ -128,6 +133,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
struct ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);

View File

@ -14,23 +14,42 @@
#define KASAN_ABI_VERSION 5
/*
* Clang 22 added preprocessor macros to match GCC, in hopes of eventually
* dropping __has_feature support for sanitizers:
* https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
* Create these macros for older versions of clang so that it is easy to clean
* up once the minimum supported version of LLVM for building the kernel always
* creates these macros.
*
* Note: Checking __has_feature(*_sanitizer) is only true if the feature is
* enabled. Therefore it is not required to additionally check defined(CONFIG_*)
* to avoid adding redundant attributes in other configurations.
*/
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
#define __SANITIZE_ADDRESS__
#endif
#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
#define __SANITIZE_HWADDRESS__
#endif
#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
#define __SANITIZE_THREAD__
#endif
/*
* Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
*/
#ifdef __SANITIZE_HWADDRESS__
#define __SANITIZE_ADDRESS__
#endif
#ifdef __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
#else
#define __no_sanitize_address
#endif
#if __has_feature(thread_sanitizer)
/* emulate gcc's __SANITIZE_THREAD__ flag */
#define __SANITIZE_THREAD__
#ifdef __SANITIZE_THREAD__
#define __no_sanitize_thread \
__attribute__((no_sanitize("thread")))
#else

View File

@ -329,7 +329,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity);
/**
* irq_update_affinity_hint - Update the affinity hint
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, false);
}
/**
* irq_set_affinity_and_hint - Update the affinity hint and apply the provided
* cpumask to the interrupt
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint and if @m is not NULL it applies it as the
* affinity of that interrupt.
*/
static inline int
irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, true);
}
/*
* Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
* instead.
*/
static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return irq_set_affinity_and_hint(irq, m);
}
extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity);
@ -361,6 +400,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_update_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_and_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{

29
include/linux/pgalloc.h Normal file
View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PGALLOC_H
#define _LINUX_PGALLOC_H
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
/*
* {pgd,p4d}_populate_kernel() are defined as macros to allow
* compile-time optimization based on the configured page table levels.
* Without this, linking may fail because callers (e.g., KASAN) may rely
* on calls to these functions being optimized away when passing symbols
* that exist only for certain page table levels.
*/
#define pgd_populate_kernel(addr, pgd, p4d) \
do { \
pgd_populate(&init_mm, pgd, p4d); \
if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED) \
arch_sync_kernel_mappings(addr, addr); \
} while (0)
#define p4d_populate_kernel(addr, p4d, pud) \
do { \
p4d_populate(&init_mm, p4d, pud); \
if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED) \
arch_sync_kernel_mappings(addr, addr); \
} while (0)
#endif /* _LINUX_PGALLOC_H */

View File

@ -1382,8 +1382,8 @@ static inline int pmd_protnone(pmd_t pmd)
/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
* and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
* needs to be called.
* and let generic vmalloc, ioremap and page table update code know when
* arch_sync_kernel_mappings() needs to be called.
*/
#ifndef ARCH_PAGE_TABLE_SYNC_MASK
#define ARCH_PAGE_TABLE_SYNC_MASK 0
@ -1522,10 +1522,11 @@ static inline bool arch_has_pfn_modify_check(void)
/*
* Page Table Modification bits for pgtbl_mod_mask.
*
* These are used by the p?d_alloc_track*() set of functions an in the generic
* vmalloc/ioremap code to track at which page-table levels entries have been
* modified. Based on that the code can better decide when vmalloc and ioremap
* mapping changes need to be synchronized to other page-tables in the system.
* These are used by the p?d_alloc_track*() and p*d_populate_kernel()
* functions in the generic vmalloc, ioremap and page table update code
* to track at which page-table levels entries have been modified.
* Based on that the code can better decide when page table changes need
* to be synchronized to other page-tables in the system.
*/
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1

View File

@ -349,6 +349,8 @@ struct bpf_local_storage;
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @sk_owner: reference to the real owner of the socket that calls
* sock_lock_init_class_and_name().
*/
struct sock {
/*
@ -537,6 +539,10 @@ struct sock {
struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct rcu_head sk_rcu;
#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
struct module *sk_owner;
#endif
};
enum sk_pacing {
@ -1662,6 +1668,35 @@ static inline void sock_release_ownership(struct sock *sk)
}
}
#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
static inline void sk_owner_set(struct sock *sk, struct module *owner)
{
__module_get(owner);
sk->sk_owner = owner;
}
static inline void sk_owner_clear(struct sock *sk)
{
sk->sk_owner = NULL;
}
static inline void sk_owner_put(struct sock *sk)
{
module_put(sk->sk_owner);
}
#else
static inline void sk_owner_set(struct sock *sk, struct module *owner)
{
}
static inline void sk_owner_clear(struct sock *sk)
{
}
static inline void sk_owner_put(struct sock *sk)
{
}
#endif
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@ -1671,13 +1706,14 @@ static inline void sock_release_ownership(struct sock *sk)
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
sk_owner_set(sk, THIS_MODULE); \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
sizeof((sk)->sk_lock)); \
sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
(skey), (sname)); \
(skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)

View File

@ -101,6 +101,8 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */
#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */
/*
* CAN device statistics

View File

@ -5056,6 +5056,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
verbose(env, "verifier bug. Two map pointers in a timer helper\n");
return -EFAULT;
}
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
return -EOPNOTSUPP;
}
meta->map_uid = reg->map_uid;
meta->map_ptr = map;
return 0;

View File

@ -122,8 +122,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
* of concurrent destructions. Use a separate workqueue so that cgroup
* destruction work items don't end up filling up max_active of system_wq
* which may lead to deadlock.
*
* A cgroup destruction should enqueue work sequentially to:
* cgroup_offline_wq: use for css offline work
* cgroup_release_wq: use for css release work
* cgroup_free_wq: use for free work
*
* Rationale for using separate workqueues:
* The cgroup root free work may depend on completion of other css offline
* operations. If all tasks were enqueued to a single workqueue, this could
* create a deadlock scenario where:
* - Free work waits for other css offline work to complete.
* - But other css offline work is queued after free work in the same queue.
*
* Example deadlock scenario with single workqueue (cgroup_destroy_wq):
* 1. umount net_prio
* 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
* 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
* 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
* 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
* which can never complete as it's behind in the same queue and
* workqueue's max_active is 1.
*/
static struct workqueue_struct *cgroup_destroy_wq;
static struct workqueue_struct *cgroup_offline_wq;
static struct workqueue_struct *cgroup_release_wq;
static struct workqueue_struct *cgroup_free_wq;
/* generate an array of cgroup subsystem pointers */
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
@ -5263,7 +5286,7 @@ static void css_release_work_fn(struct work_struct *work)
mutex_unlock(&cgroup_mutex);
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
}
static void css_release(struct percpu_ref *ref)
@ -5272,7 +5295,7 @@ static void css_release(struct percpu_ref *ref)
container_of(ref, struct cgroup_subsys_state, refcnt);
INIT_WORK(&css->destroy_work, css_release_work_fn);
queue_work(cgroup_destroy_wq, &css->destroy_work);
queue_work(cgroup_release_wq, &css->destroy_work);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
@ -5394,7 +5417,7 @@ err_list_del:
err_free_css:
list_del_rcu(&css->rstat_css_node);
INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
return ERR_PTR(err);
}
@ -5631,7 +5654,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
if (atomic_dec_and_test(&css->online_cnt)) {
INIT_WORK(&css->destroy_work, css_killed_work_fn);
queue_work(cgroup_destroy_wq, &css->destroy_work);
queue_work(cgroup_offline_wq, &css->destroy_work);
}
}
@ -6008,8 +6031,14 @@ static int __init cgroup_wq_init(void)
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
BUG_ON(!cgroup_offline_wq);
cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
BUG_ON(!cgroup_release_wq);
cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
BUG_ON(!cgroup_free_wq);
return 0;
}
core_initcall(cgroup_wq_init);

View File

@ -502,7 +502,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
}
EXPORT_SYMBOL_GPL(irq_force_affinity);
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
@ -511,12 +512,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
return -EINVAL;
desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags);
/* set the initial affinity to prevent every interrupt being on CPU0 */
if (m)
if (m && setaffinity)
__irq_set_affinity(irq, m, false);
return 0;
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
static void irq_affinity_notify(struct work_struct *work)
{

View File

@ -631,17 +631,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
/*
* Is the high resolution mode active ?
*/
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
cpu_base->hres_active : 0;
}
static inline int hrtimer_hres_active(void)
{
return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
}
static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
struct hrtimer *next_timer,
ktime_t expires_next)
@ -665,7 +660,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
* set. So we'd effectively block all timers until the T2 event
* fires.
*/
if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
if (!hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
return;
tick_program_event(expires_next, 1);
@ -775,13 +770,13 @@ static void retrigger_next_event(void *arg)
* of the next expiring timer is enough. The return from the SMP
* function call will take care of the reprogramming in case the
* CPU was in a NOHZ idle sleep.
*
* In periodic low resolution mode, the next softirq expiration
* must also be updated.
*/
if (!__hrtimer_hres_active(base) && !tick_nohz_active)
return;
raw_spin_lock(&base->lock);
hrtimer_update_base(base);
if (__hrtimer_hres_active(base))
if (hrtimer_hres_active(base))
hrtimer_force_reprogram(base, 0);
else
hrtimer_update_next_event(base);
@ -938,7 +933,7 @@ void clock_was_set(unsigned int bases)
cpumask_var_t mask;
int cpu;
if (!__hrtimer_hres_active(cpu_base) && !tick_nohz_active)
if (!hrtimer_hres_active(cpu_base) && !tick_nohz_active)
goto out_timerfd;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
@ -1489,7 +1484,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
if (!hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@ -1512,7 +1507,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (__hrtimer_hres_active(cpu_base)) {
if (hrtimer_hres_active(cpu_base)) {
unsigned int active;
if (!cpu_base->softirq_activated) {
@ -1873,25 +1868,7 @@ retry:
tick_program_event(expires_next, 1);
pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
}
/* called with interrupts disabled */
static inline void __hrtimer_peek_ahead_timers(void)
{
struct tick_device *td;
if (!hrtimer_hres_active())
return;
td = this_cpu_ptr(&tick_cpu_device);
if (td && td->evtdev)
hrtimer_interrupt(td->evtdev);
}
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
#endif /* !CONFIG_HIGH_RES_TIMERS */
/*
* Called from run_local_timers in hardirq context every jiffy
@ -1902,7 +1879,7 @@ void hrtimer_run_queues(void)
unsigned long flags;
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
if (hrtimer_hres_active(cpu_base))
return;
/*
@ -2252,11 +2229,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
&new_base->clock_base[i]);
}
/*
* The migration might have changed the first expiring softirq
* timer on this CPU. Update it.
*/
__hrtimer_get_next_event(new_base, HRTIMER_ACTIVE_SOFT);
/* Tell the other CPU to retrigger the next event */
smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);

View File

@ -7243,7 +7243,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
entry = ring_buffer_event_data(event);
entry->ip = _THIS_IP_;
len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
if (len) {
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
cnt = FAULTED_SIZE;
@ -7318,7 +7318,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
entry = ring_buffer_event_data(event);
len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
len = copy_from_user_nofault(&entry->id, ubuf, cnt);
if (len) {
entry->id = -1;
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);

View File

@ -239,6 +239,10 @@ static int dyn_event_open(struct inode *inode, struct file *file)
{
int ret;
ret = security_locked_down(LOCKDOWN_TRACEFS);
if (ret)
return ret;
ret = tracing_check_open_get_tr(NULL);
if (ret)
return ret;

Some files were not shown because too many files have changed in this diff Show More