Linux 5.13.13

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE4n5dijQDou9mhzu83qZv95d3LNwFAmEnjhYACgkQ3qZv95d3
 LNyQig/8DC76FNZ+LJbh5zv4mpvS301Vuk4xqxmGiDnPpbglYYCdE9nl4GcgPIoM
 zNvMLeVXWnMrq8K84KskqdGJoPhpH197yU/lbirWIfGORHOvUoJCKSxXA1T6yFWX
 GCf34YxtHaxPH3Olt1FIm/VwM/KsS+Rp9/Am85kc2Ramv0/f3ZIkKrQ/UVkno8IE
 gC4M/HKOafox+GJ7lLlU/4FL7dVCTtnFsaDv7eUczrPE8WBtV1I9pef/EvHiD+j7
 DA6wcCh/mtgOijwnHMRnlHlDQKH52pIVBZks6WwjxpbEmDtVOvLHUPw0QZSy6ZYw
 4w0jwU3rkvnEw5sy/z7s1y3R5NBEUx1UT0Ze7fqn7qRKEIGdb1g9NLl3gYy5w1Ew
 yApRuhNMY0sKPPahtyz8vFUTxiMDSpg8AfaXZOAJArCqXw3acgNmLQLcd9nLHCQq
 DiB9+PIwoux/Q4LLC/0pMnF7K82EecWZ159fTAYwtxZ+EpJWF5BYwODh39jZM6Qz
 kreOkSPMufu3xYSLT7Vjt5ol+yjYvMPKrGkNsw+VemZ6rxgiH/RGUiGcSdc1v7eG
 LnpObqLB3bTRSGgVTqIOloRiEQHJVsuD/5eYd4BKp09n0K2f4W5jJOSANpNoB+C0
 lIry4ZCQr/Ap28vnjoyahFcMfiHiKaAO3Wqfgb8v4odNxujcXo0=
 =Pxf/
 -----END PGP SIGNATURE-----

Merge tag 'v5.13.13' into v5.13/standard/base

Linux 5.13.13

# gpg: Signature made Thu 26 Aug 2021 08:50:30 AM EDT
# gpg:                using RSA key E27E5D8A3403A2EF66873BBCDEA66FF797772CDC
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2021-09-01 10:36:03 -04:00
commit 4f74221580
129 changed files with 1196 additions and 777 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 13
SUBLEVEL = 12
SUBLEVEL = 13
EXTRAVERSION =
NAME = Opossums on Parade

View File

@ -582,7 +582,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
clock-frequency = <400000>;
clock-frequency = <100000>;
tps65218: tps65218@24 {
reg = <0x24>;

View File

@ -755,14 +755,14 @@
status = "disabled";
};
vica: intc@10140000 {
vica: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x20>;
};
vicb: intc@10140020 {
vicb: interrupt-controller@10140020 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;

View File

@ -181,6 +181,8 @@ archprepare:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
$(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, LGE Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
@ -9,6 +10,9 @@
#include "pm8994.dtsi"
#include "pmi8994.dtsi"
/* cont_splash_mem has different memory mapping */
/delete-node/ &cont_splash_mem;
/ {
model = "LG Nexus 5X";
compatible = "lg,bullhead", "qcom,msm8992";
@ -17,6 +21,9 @@
qcom,board-id = <0xb64 0>;
qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
/* Bullhead firmware doesn't support PSCI */
/delete-node/ psci;
aliases {
serial0 = &blsp1_uart2;
};
@ -38,6 +45,11 @@
ftrace-size = <0x10000>;
pmsg-size = <0x20000>;
};
cont_splash_mem: memory@3400000 {
reg = <0 0x03400000 0 0x1200000>;
no-map;
};
};
};

View File

@ -1,12 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, Huawei Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
#include "msm8994.dtsi"
/* Angler's firmware does not report where the memory is allocated */
/delete-node/ &cont_splash_mem;
/ {
model = "Huawei Nexus 6P";
compatible = "huawei,angler", "qcom,msm8994";

View File

@ -69,7 +69,7 @@
};
rmtfs_upper_guard: memory@f5d01000 {
no-map;
reg = <0 0xf5d01000 0 0x2000>;
reg = <0 0xf5d01000 0 0x1000>;
};
/*
@ -78,7 +78,7 @@
*/
removed_region: memory@88f00000 {
no-map;
reg = <0 0x88f00000 0 0x200000>;
reg = <0 0x88f00000 0 0x1c00000>;
};
ramoops: ramoops@ac300000 {

View File

@ -700,7 +700,7 @@
left_spkr: wsa8810-left{
compatible = "sdw10217211000";
reg = <0 3>;
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrLeft";
#sound-dai-cells = <0>;
@ -708,7 +708,7 @@
right_spkr: wsa8810-right{
compatible = "sdw10217211000";
powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
reg = <0 4>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrRight";

View File

@ -4,9 +4,50 @@
#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
#include <asm/mmu.h>
#include <asm/synch.h>
#ifndef __ASSEMBLY__
static __always_inline bool kuep_is_disabled(void)
{
return !IS_ENABLED(CONFIG_PPC_KUEP);
}
static inline void kuep_lock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) | SR_NX);
/*
* This isync() shouldn't be necessary as the kernel is not excepted to
* run any instruction in userspace soon after the update of segments,
* but hash based cores (at least G3) seem to exhibit a random
* behaviour when the 'isync' is not there. 603 cores don't have this
* behaviour so don't do the 'isync' as it saves several CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
}
static inline void kuep_unlock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) & ~SR_NX);
/*
* This isync() shouldn't be necessary as a 'rfi' will soon be executed
* to return to userspace, but hash based cores (at least G3) seem to
* exhibit a random behaviour when the 'isync' is not there. 603 cores
* don't have this behaviour so don't do the 'isync' as it saves several
* CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
}
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>

View File

@ -102,6 +102,33 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
#include <asm/reg.h>
#include <asm/task_size_32.h>
#define UPDATE_TWO_USER_SEGMENTS(n) do { \
if (TASK_SIZE > ((n) << 28)) \
mtsr(val1, (n) << 28); \
if (TASK_SIZE > (((n) + 1) << 28)) \
mtsr(val2, ((n) + 1) << 28); \
val1 = (val1 + 0x222) & 0xf0ffffff; \
val2 = (val2 + 0x222) & 0xf0ffffff; \
} while (0)
static __always_inline void update_user_segments(u32 val)
{
int val1 = val;
int val2 = (val + 0x111) & 0xf0ffffff;
UPDATE_TWO_USER_SEGMENTS(0);
UPDATE_TWO_USER_SEGMENTS(2);
UPDATE_TWO_USER_SEGMENTS(4);
UPDATE_TWO_USER_SEGMENTS(6);
UPDATE_TWO_USER_SEGMENTS(8);
UPDATE_TWO_USER_SEGMENTS(10);
UPDATE_TWO_USER_SEGMENTS(12);
UPDATE_TWO_USER_SEGMENTS(14);
}
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use

View File

@ -46,10 +46,7 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
void kuep_lock(void);
void kuep_unlock(void);
#else
#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif

View File

@ -10,3 +10,4 @@ obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
obj-$(CONFIG_PPC_KUEP) += kuep.o
obj-$(CONFIG_PPC_KUAP) += kuap.o

View File

@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <asm/kup.h>
void __init setup_kuap(bool disabled)
{
pr_info("Activating Kernel Userspace Access Protection\n");
if (disabled)
pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
}

View File

@ -1,40 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <asm/kup.h>
#include <asm/reg.h>
#include <asm/task_size_32.h>
#include <asm/mmu.h>
#define KUEP_UPDATE_TWO_USER_SEGMENTS(n) do { \
if (TASK_SIZE > ((n) << 28)) \
mtsr(val1, (n) << 28); \
if (TASK_SIZE > (((n) + 1) << 28)) \
mtsr(val2, ((n) + 1) << 28); \
val1 = (val1 + 0x222) & 0xf0ffffff; \
val2 = (val2 + 0x222) & 0xf0ffffff; \
} while (0)
static __always_inline void kuep_update(u32 val)
void __init setup_kuep(bool disabled)
{
int val1 = val;
int val2 = (val + 0x111) & 0xf0ffffff;
pr_info("Activating Kernel Userspace Execution Prevention\n");
KUEP_UPDATE_TWO_USER_SEGMENTS(0);
KUEP_UPDATE_TWO_USER_SEGMENTS(2);
KUEP_UPDATE_TWO_USER_SEGMENTS(4);
KUEP_UPDATE_TWO_USER_SEGMENTS(6);
KUEP_UPDATE_TWO_USER_SEGMENTS(8);
KUEP_UPDATE_TWO_USER_SEGMENTS(10);
KUEP_UPDATE_TWO_USER_SEGMENTS(12);
KUEP_UPDATE_TWO_USER_SEGMENTS(14);
}
void kuep_lock(void)
{
kuep_update(mfsr(0) | SR_NX);
}
void kuep_unlock(void)
{
kuep_update(mfsr(0) & ~SR_NX);
if (disabled)
pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
}

View File

@ -445,26 +445,6 @@ void __init print_system_hash_info(void)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
}
#ifdef CONFIG_PPC_KUEP
void __init setup_kuep(bool disabled)
{
pr_info("Activating Kernel Userspace Execution Prevention\n");
if (disabled)
pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
}
#endif
#ifdef CONFIG_PPC_KUAP
void __init setup_kuap(bool disabled)
{
pr_info("Activating Kernel Userspace Access Protection\n");
if (disabled)
pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
}
#endif
void __init early_init_mmu(void)
{
}

View File

@ -230,8 +230,8 @@ static void __init init_resources(void)
}
/* Clean-up any unused pre-allocated resources */
mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
memblock_free(__pa(mem_res), mem_res_sz);
if (res_idx >= 0)
memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res));
return;
error:

View File

@ -560,9 +560,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
int pcibios_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct resource *res;
int i;
/* The pdev has a reference to the zdev via its bus */
zpci_zdev_get(zdev);
if (pdev->is_physfn)
pdev->no_vf_scan = 1;
@ -582,7 +585,10 @@ int pcibios_add_device(struct pci_dev *pdev)
void pcibios_release_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
zpci_unmap_resources(pdev);
zpci_zdev_put(zdev);
}
int pcibios_enable_device(struct pci_dev *pdev, int mask)

View File

@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
kref_put(&zdev->kref, zpci_release_device);
}
static inline void zpci_zdev_get(struct zpci_dev *zdev)
{
kref_get(&zdev->kref);
}
int zpci_alloc_domain(int domain);
void zpci_free_domain(int domain);
int zpci_setup_bus_resources(struct zpci_dev *zdev,

View File

@ -2489,13 +2489,15 @@ void perf_clear_dirty_counters(void)
return;
for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
/* Metrics and fake events don't have corresponding HW counters. */
if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
continue;
else if (i >= INTEL_PMC_IDX_FIXED)
if (i >= INTEL_PMC_IDX_FIXED) {
/* Metrics and fake events don't have corresponding HW counters. */
if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed))
continue;
wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
else
} else {
wrmsrl(x86_pmu_event_addr(i), 0);
}
}
bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);

View File

@ -596,13 +596,13 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *head = &kcq->rq_list[sched_domain];
spin_lock(&kcq->lock);
trace_block_rq_insert(rq);
if (at_head)
list_move(&rq->queuelist, head);
else
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
rq->mq_ctx->index_hw[hctx->type]);
trace_block_rq_insert(rq);
spin_unlock(&kcq->lock);
}
}

View File

@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev)
return error;
error = sysc_check_active_timer(ddata);
if (error == -EBUSY)
if (error == -ENXIO)
ddata->reserved = true;
else if (error)
return error;
error = sysc_get_clocks(ddata);
if (error)

View File

@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
}
imx_register_uart_clocks(1);
imx_register_uart_clocks(2);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);

View File

@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
if (on < 0)
return on;
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if ((sc->flags & VOTABLE) && on)
if (on) {
/* The regulator must be on, sync the kernel state */
if (sc->rsupply) {
ret = regulator_enable(sc->rsupply);
if (ret < 0)
return ret;
}
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if (sc->flags & VOTABLE) {
ret = regmap_update_bits(sc->regmap, sc->gdscr,
SW_COLLAPSE_MASK, val);
if (ret)
return ret;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
return ret;
}
/*
* Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all
* the register contents that we thought we were saving.
*/
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
} else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd);
/*
* Make sure the retain bit is set if the GDSC is already on, otherwise
* we end up turning off the GDSC and destroying all the register
* contents that we thought we were saving.
*/
if ((sc->flags & RETAIN_FF_ENABLE) && on)
gdsc_retain_ff_on(sc);
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
if (sc->flags & ALWAYS_ON) {
if (!on)
gdsc_enable(&sc->pd);
on = true;
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (on || (sc->pwrsts & PWRSTS_RET))
@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
else
gdsc_clear_mem_on(sc);
if (sc->flags & ALWAYS_ON)
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
if (!sc->pd.power_off)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)

View File

@ -104,7 +104,11 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
{.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
/*
* The cpufreq scaling for 1.2 GHz variant of the SOC is currently
* unstable because we do not know how to configure it properly.
*/
/* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },

View File

@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
}
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
ret = -ENOMEM;
return -ENOMEM;
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);

View File

@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
ofdma_target = of_dma_find_controller(&dma_spec_target);
if (!ofdma_target)
return NULL;
if (!ofdma_target) {
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
chan = ERR_PTR(-EPROBE_DEFER);
goto err;
}
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) {
@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
}
}
err:
/*
* Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec

View File

@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
error:
of_dma_controller_free(pdev->dev.of_node);
pm_runtime_put(&pdev->dev);
error_pm:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
* @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock;
bool err;
bool idle;
bool terminating;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
/*
* While we ran a callback the user called a terminate function,
* which takes care of cleaning up any remaining descriptors
*/
if (chan->terminating)
break;
}
spin_unlock_irqrestore(&chan->lock, flags);
@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic)
chan->cyclic = true;
chan->terminating = false;
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
chan->terminating = true;
xilinx_dma_free_descriptors(chan);
chan->idle = true;

View File

@ -1293,6 +1293,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
return false;
}
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_RENOIR) &&
(adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52))
return true;
else
return false;
}
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@ -3673,7 +3683,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
/* If GC has entered CGPG, ringing doorbell > first page
* doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
* workaround this issue. And this change has to align with firmware
* update.
*/
if (check_if_enlarge_doorbell_range(adev))
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell.size - 4));
else
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell_index.userqueue_end * 2) << 2);
}

View File

@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
/* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
tmds_present = true;
}

View File

@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
h_blank_start - 200 - 1,
(h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
}

View File

@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
static void calculate_wm_set_for_vlevel(
int vlevel,
struct wm_range_table_entry *table_entry,
struct dcn_watermarks *wm_set,
struct display_mode_lib *dml,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
ASSERT(vlevel < dml->soc.num_states);
/* only pipe 0 is read for voltage and dcf/soc clocks */
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
static void dcn301_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel_req)
{
int i, pipe_idx;
int vlevel, vlevel_max;
struct wm_range_table_entry *table_entry;
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
ASSERT(bw_params);
vlevel_max = bw_params->clk_table.num_entries - 1;
/* WM Set D */
table_entry = &bw_params->wm_table.entries[WM_D];
if (table_entry->wm_type == WM_TYPE_RETRAINING)
vlevel = 0;
else
vlevel = vlevel_max;
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
vlevel = min(max(vlevel_req, 2), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
vlevel = min(max(vlevel_req, 1), vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set A */
table_entry = &bw_params->wm_table.entries[WM_A];
vlevel = min(vlevel_req, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
&context->bw_ctx.dml, pipes, pipe_cnt);
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
pipe_idx++;
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,

View File

@ -5910,13 +5910,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
{
if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
bxt_enable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
intel_de_rmw(i915, SOUTH_CHICKEN1,
SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_enable_pc8(i915);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct drm_i915_private *i915)
@ -5924,13 +5924,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 11 || IS_GEN9_LP(i915)) {
gen9_sanitize_dc_state(i915);
bxt_disable_dc9(i915);
/* Tweaked Wa_14010685332:icp,jsp,mcc */
if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
void intel_display_power_suspend(struct drm_i915_private *i915)

View File

@ -2421,6 +2421,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 iir;
enum pipe pipe;
drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
if (iir) {
@ -3040,32 +3042,13 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
/*
* Wa_14010685332:cnp/cmp,tgp,adp
* TODO: Clarify which platforms this applies to
* TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
* on earlier platforms and whether the workaround is also needed for runtime suspend/resume
*/
if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
(INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
SBCLK_RUN_REFCLK_DIS);
intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
}
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(&dev_priv->gt);
if (!HAS_DISPLAY(dev_priv))
return;
intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
@ -3077,12 +3060,21 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(&dev_priv->gt);
gen8_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
cnp_display_clock_wa(dev_priv);
}
static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@ -3092,6 +3084,9 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
if (!HAS_DISPLAY(dev_priv))
return;
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (DISPLAY_VER(dev_priv) >= 12) {
@ -3123,8 +3118,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
cnp_display_clock_wa(dev_priv);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
@ -3714,6 +3707,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
enum pipe pipe;
if (!HAS_DISPLAY(dev_priv))
return;
if (DISPLAY_VER(dev_priv) <= 10)
de_misc_masked |= GEN8_DE_MISC_GSE;
@ -3797,6 +3793,16 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
if (!HAS_DISPLAY(dev_priv))
return;
gen8_de_irq_postinstall(dev_priv);
intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
GEN11_DISPLAY_IRQ_ENABLE);
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
@ -3807,12 +3813,10 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
icp_irq_postinstall(dev_priv);
gen11_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
gen11_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
dg1_master_intr_enable(uncore->regs);
intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);

View File

@ -134,6 +134,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
static int mtk_disp_color_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_color_component_ops);
return 0;
}

View File

@ -424,6 +424,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
static int mtk_disp_ovl_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
return 0;
}

View File

@ -34,6 +34,7 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
#define DISP_AAL_OUTPUT_SIZE 0x04d8
#define DISP_DITHER_EN 0x0000
#define DITHER_EN BIT(0)
@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w,
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE);
mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE);
}
static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)

View File

@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
sg_free_table(&sh->sgt);
kfree(sh);
}
#endif /* CONFIG_DMA_REMAP */

View File

@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
u16 did, pgtt;
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
return;
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))

View File

@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
}
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);

View File

@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev)
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
if (!group)
return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */

View File

@ -91,16 +91,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
free_irq(tpci200->info->pdev->irq, (void *) tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_disable_device(tpci200->info->pdev);
pci_dev_put(tpci200->info->pdev);
}
static void tpci200_enable_irq(struct tpci200_board *tpci200,
@ -259,7 +256,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_disable_pci;
goto err_disable_device;
}
/* Request IO ID INT space (Bar 3) */
@ -271,7 +268,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ip_space;
goto err_ip_interface_bar;
}
/* Request MEM8 space (Bar 5) */
@ -282,7 +279,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ioid_int_space;
goto err_io_id_int_spaces_bar;
}
/* Request MEM16 space (Bar 4) */
@ -293,7 +290,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_mem8_space;
goto err_mem8_space_bar;
}
/* Map internal tpci200 driver user space */
@ -307,7 +304,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
res = -ENOMEM;
goto out_release_mem8_space;
goto err_mem16_space_bar;
}
/* Initialize lock that protects interface_regs */
@ -346,18 +343,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) unable to register IRQ !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ioid_int_space;
goto err_interface_regs;
}
return 0;
out_release_mem8_space:
err_interface_regs:
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
err_mem16_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
err_mem8_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
out_release_ioid_int_space:
err_io_id_int_spaces_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
out_release_ip_space:
err_ip_interface_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
out_disable_pci:
err_disable_device:
pci_disable_device(tpci200->info->pdev);
return res;
}
@ -529,7 +530,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
if (!tpci200->info) {
ret = -ENOMEM;
goto out_err_info;
goto err_tpci200;
}
pci_dev_get(pdev);
@ -540,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
ret = -EBUSY;
goto out_err_pci_request;
goto err_tpci200_info;
}
tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@ -548,7 +549,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (!tpci200->info->cfg_regs) {
dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
ret = -EFAULT;
goto out_err_ioremap;
goto err_request_region;
}
/* Disable byte swapping for 16 bit IP module access. This will ensure
@ -571,7 +572,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "error during tpci200 install\n");
ret = -ENODEV;
goto out_err_install;
goto err_cfg_regs;
}
/* Register the carrier in the industry pack bus driver */
@ -583,7 +584,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"error registering the carrier on ipack driver\n");
ret = -EFAULT;
goto out_err_bus_register;
goto err_tpci200_install;
}
/* save the bus number given by ipack to logging purpose */
@ -594,19 +595,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200_create_device(tpci200, i);
return 0;
out_err_bus_register:
err_tpci200_install:
tpci200_uninstall(tpci200);
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
tpci200->info->cfg_regs = NULL;
out_err_install:
if (tpci200->info->cfg_regs)
iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
err_cfg_regs:
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
err_request_region:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:
pci_dev_put(pdev);
err_tpci200_info:
kfree(tpci200->info);
out_err_info:
pci_dev_put(pdev);
err_tpci200:
kfree(tpci200);
return ret;
}
@ -616,6 +614,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
ipack_bus_unregister(tpci200->info->ipack_bus);
tpci200_uninstall(tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_dev_put(tpci200->info->pdev);
kfree(tpci200->info);
kfree(tpci200);
}

View File

@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
continue;
}
dw_mci_stop_dma(host);
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}

View File

@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
u32 status;
int ret = 0;
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
spin_lock_irqsave(&host->lock, flags);
spin_lock_irqsave(&host->lock, flags);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
host->pwr_reg & MCI_STM32_VSWITCHEN) {
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
spin_unlock_irqrestore(&host->lock, flags);
@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
host->base + MMCICLEAR);
spin_lock_irqsave(&host->lock, flags);
mmci_write_pwrreg(host, host->pwr_reg &
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
}
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}

View File

@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
/*
* There is a known bug on BCM2711's SDHCI core integration where the
* controller will hang when the difference between the core clock and the bus
* clock is too great. Specifically this can be reproduced under the following
* conditions:
*
* - No SD card plugged in, polling thread is running, probing cards at
* 100 kHz.
* - BCM2711's core clock configured at 500MHz or more
*
* So we set 200kHz as the minimum clock frequency available for that SoC.
*/
static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
{
return 200000;
}
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
.get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &sdhci_iproc_bcm2711_ops,
};

View File

@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
sdhci_cqe_disable(mmc, recovery);
}
static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
u32 count, start = 15;
__sdhci_set_timeout(host, cmd);
count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
/*
* Update software timeout value if its value is less than hardware data
* timeout value. Qcom SoC hardware data timeout value was calculated
* using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
*/
if (cmd && cmd->data && host->clock > 400000 &&
host->clock <= 50000000 &&
((1 << (count + start)) > (10 * host->clock)))
host->data_timeout = 22LL * NSEC_PER_SEC;
}
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.irq = sdhci_msm_cqe_irq,
.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
.set_power = sdhci_set_power_noreg,
.set_timeout = sdhci_msm_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {

View File

@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
return extp->MinorVersion >= '5' &&
return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}

View File

@ -5056,12 +5056,18 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
static int of_get_nand_secure_regions(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
struct property *prop;
int nr_elem, i, j;
nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
if (!nr_elem)
/* Only proceed if the "secure-regions" property is present in DT */
prop = of_find_property(dn, "secure-regions", NULL);
if (!prop)
return 0;
nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
if (nr_elem <= 0)
return nr_elem;
chip->nr_secure_regions = nr_elem / 2;
chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
GFP_KERNEL);

View File

@ -69,7 +69,8 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@ -362,6 +363,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
struct netdev_queue *txq)
{
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
return true;
}
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@ -381,6 +409,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@ -390,8 +419,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
/* We must have raced with NAPI cleanup */
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
return NETDEV_TX_BUSY;
}
length = skb->len;
@ -498,21 +531,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
if (skb_pad(skb, pad)) {
if (skb_pad(skb, pad))
/* SKB already freed. */
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb_any(skb);
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@ -597,24 +625,17 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
bnxt_txr_db_kick(bp, txr, prod);
else
txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
bnxt_txr_db_kick(bp, txr, prod);
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
netif_tx_wake_queue(txq);
bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
@ -624,7 +645,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
@ -638,7 +658,13 @@ tx_dma_error:
PCI_DMA_TODEVICE);
}
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@ -698,14 +724,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) &&
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
txr->dev_state != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
}
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@ -1733,6 +1754,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@ -1936,6 +1961,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@ -2400,6 +2429,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@ -9018,10 +9051,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
napi_disable(&bp->bnapi[i]->napi);
}
}
@ -9055,9 +9087,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txr->dev_state = BNXT_DEV_STATE_CLOSING;
WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
/* Make sure napi polls see @dev_state change */
synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@ -9071,8 +9105,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txr->dev_state = 0;
WRITE_ONCE(txr->dev_state, 0);
}
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
@ -10644,6 +10680,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
/* 212 firmware is broken for aRFS */
if (BNXT_FW_MAJ(bp) == 212)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)

View File

@ -783,6 +783,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];

View File

@ -3038,17 +3038,6 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
{
dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@ -3058,6 +3047,21 @@ static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
dpaa2_switch_free_dpbp(ethsw);
}
static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
dpaa2_switch_ctrl_if_teardown(ethsw);
destroy_workqueue(ethsw->workqueue);
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
@ -3068,8 +3072,6 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev);
dpaa2_switch_ctrl_if_teardown(ethsw);
dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@ -3084,9 +3086,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
kfree(ethsw->acls);
kfree(ethsw->ports);
dpaa2_switch_takedown(sw_dev);
destroy_workqueue(ethsw->workqueue);
dpaa2_switch_teardown(sw_dev);
fsl_mc_portal_free(ethsw->mc_io);
@ -3199,7 +3199,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
GFP_KERNEL);
if (!(ethsw->ports)) {
err = -ENOMEM;
goto err_takedown;
goto err_teardown;
}
ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
@ -3270,8 +3270,8 @@ err_free_fdbs:
err_free_ports:
kfree(ethsw->ports);
err_takedown:
dpaa2_switch_takedown(sw_dev);
err_teardown:
dpaa2_switch_teardown(sw_dev);
err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io);

View File

@ -3665,8 +3665,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
/* is DCB enabled at all? */
if (vsi->tc_config.numtc == 1)
return i40e_swdcb_skb_tx_hash(netdev, skb,
netdev->real_num_tx_queues);
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
hw = &vsi->back->hw;

View File

@ -136,6 +136,7 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};

View File

@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;

View File

@ -540,6 +540,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
kfree(veal);
}
/**
* iavf_mac_add_ok
* @adapter: adapter structure
*
* Submit list of filters based on PF response.
**/
static void iavf_mac_add_ok(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_mac_add_reject
* @adapter: adapter structure
*
* Remove filters from list based on PF response.
**/
static void iavf_mac_add_reject(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_add_vlans
* @adapter: adapter structure
@ -1492,6 +1533,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@ -1639,10 +1681,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
case VIRTCHNL_OP_ADD_ETH_ADDR: {
case VIRTCHNL_OP_ADD_ETH_ADDR:
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =

View File

@ -52,8 +52,11 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
if (err)
if (err) {
clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err;
}
}
return 0;

View File

@ -1334,6 +1334,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
mask &= ~ocelot_get_bond_mask(ocelot, bond,

View File

@ -501,6 +501,7 @@ struct qede_fastpath {
#define QEDE_SP_HW_ERR 4
#define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7
#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,

View File

@ -1009,6 +1009,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
/* Disable execution of this deferred work once
* qede removal is in progress, this stop any future
* scheduling of sp_task.
*/
if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
return;
/* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started.
@ -1300,6 +1307,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) {
set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);

View File

@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
if (err == -EIO)
if (err == -EIO) {
qlcnic_83xx_unlock_flash(adapter);
return err;
}
word = ret;
*(u32 *)p_data = word;

View File

@ -833,6 +833,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return;
}
if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
pr_err("6pack: cooked buffer overrun, data loss\n");
sp->rx_count = 0;
return;
}
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);

View File

@ -82,6 +82,17 @@ out:
static int parent_count;
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
{
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
}
int mdio_mux_init(struct device *dev,
struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data),
@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
ret_val = -ENOMEM;
continue;
goto err_loop;
}
cb->bus_number = v;
cb->parent = pb;
@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
devm_kfree(dev, cb);
continue;
goto err_loop;
}
cb->mii_bus->priv = cb;
@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
mdiobus_free(cb->mii_bus);
if (r == -EPROBE_DEFER) {
ret_val = r;
goto err_loop;
}
devm_kfree(dev, cb);
dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node);
mdiobus_free(cb->mii_bus);
devm_kfree(dev, cb);
} else {
cb->next = pb->children;
pb->children = cb;
@ -182,6 +196,10 @@ int mdio_mux_init(struct device *dev,
dev_err(dev, "Error: No acceptable child buses found\n");
devm_kfree(dev, pb);
err_loop:
mdio_mux_uninit_children(pb);
of_node_put(child_bus_node);
err_pb_kz:
put_device(&parent_bus->dev);
err_parent_bus:
@ -193,14 +211,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle)
{
struct mdio_mux_parent_bus *pb = mux_handle;
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);

View File

@ -205,8 +205,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int asix_set_sw_mii(struct usbnet *dev, int in_pm);
int asix_set_hw_mii(struct usbnet *dev, int in_pm);
int asix_read_phy_addr(struct usbnet *dev, int internal);
int asix_get_phy_addr(struct usbnet *dev);
int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);

View File

@ -288,33 +288,34 @@ int asix_set_hw_mii(struct usbnet *dev, int in_pm)
return ret;
}
int asix_read_phy_addr(struct usbnet *dev, int internal)
int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
int offset = (internal ? 1 : 0);
int ret, offset;
u8 buf[2];
int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
if (ret < 0)
goto error;
if (ret < 2) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
ret = -EIO;
goto error;
}
netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
*((__le16 *)buf));
offset = (internal ? 1 : 0);
ret = buf[offset];
out:
netdev_dbg(dev->net, "%s PHY address 0x%x\n",
internal ? "internal" : "external", ret);
return ret;
error:
netdev_err(dev->net, "Error reading PHY_ID register: %02x\n", ret);
return ret;
}
int asix_get_phy_addr(struct usbnet *dev)
{
/* return the address of the internal phy */
return asix_read_phy_addr(dev, 1);
}
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
{
int ret;

View File

@ -262,7 +262,10 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->mii.phy_id = asix_read_phy_addr(dev, true);
if (dev->mii.phy_id < 0)
return dev->mii.phy_id;
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
@ -717,7 +720,10 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->mii.phy_id = asix_read_phy_addr(dev, true);
if (dev->mii.phy_id < 0)
return dev->mii.phy_id;
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
@ -1081,7 +1087,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0xff;
dev->mii.supports_gmii = 1;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->mii.phy_id = asix_read_phy_addr(dev, true);
if (dev->mii.phy_id < 0)
return dev->mii.phy_id;
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;

View File

@ -220,6 +220,11 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
}
priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
if (priv->phy_addr < 0) {
ret = priv->phy_addr;
goto free;
}
ax88172a_reset_phy(dev, priv->use_embdphy);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */

View File

@ -1154,7 +1154,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
int ladv, radv, ret;
int ladv, radv, ret, link;
u32 buf;
/* clear LAN78xx interrupt status */
@ -1162,9 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0))
return -EIO;
mutex_lock(&phydev->lock);
phy_read_status(phydev);
link = phydev->link;
mutex_unlock(&phydev->lock);
if (!phydev->link && dev->link_on) {
if (!link && dev->link_on) {
dev->link_on = false;
/* reset MAC */
@ -1177,7 +1180,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO;
del_timer(&dev->stat_monitor);
} else if (phydev->link && !dev->link_on) {
} else if (link && !dev->link_on) {
dev->link_on = true;
phy_ethtool_ksettings_get(phydev, &ecmd);
@ -1466,9 +1469,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
static u32 lan78xx_get_link(struct net_device *net)
{
phy_read_status(net->phydev);
u32 link;
return net->phydev->link;
mutex_lock(&net->phydev->lock);
phy_read_status(net->phydev);
link = net->phydev->link;
mutex_unlock(&net->phydev->lock);
return link;
}
static void lan78xx_get_drvinfo(struct net_device *net,

View File

@ -132,9 +132,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
const void *data)
{
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
int ret;
ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
PEGASUS_REQT_WRITE, 0, indx, data, size,
1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
}
/*
@ -145,10 +151,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
void *buf = &data;
int ret;
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
PEGASUS_REQT_WRITE, data, indx, buf, 1,
1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
}
static int update_eth_regs_async(pegasus_t *pegasus)
@ -188,10 +199,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
int i, ret;
__le16 regdi;
int ret = -ETIMEDOUT;
__u8 data[4] = { phy, 0, 0, indx };
if (cmd & PHY_WRITE) {
__le16 *t = (__le16 *) & data[1];
@ -207,12 +217,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
if (data[0] & PHY_DONE)
break;
}
if (i >= REG_TIMEOUT)
if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail;
}
if (cmd & PHY_READ) {
ret = get_registers(p, PhyData, 2, &regdi);
if (ret < 0)
goto fail;
*regd = le16_to_cpu(regdi);
return ret;
}
return 0;
fail:
@ -235,9 +248,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
pegasus_t *pegasus = netdev_priv(dev);
int ret;
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
ret = read_mii_word(pegasus, phy_id, loc, &res);
if (ret < 0)
return ret;
return (int)res;
}
@ -251,10 +268,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
__u8 tmp = 0;
int ret, i;
__le16 retdatai;
int ret;
__u8 tmp = 0;
set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EpromOffset, index);
@ -262,21 +278,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (ret < 0)
goto fail;
if (tmp & EPROM_DONE)
break;
if (ret == -ESHUTDOWN)
goto fail;
}
if (i >= REG_TIMEOUT)
if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail;
}
ret = get_registers(pegasus, EpromData, 2, &retdatai);
if (ret < 0)
goto fail;
*retdata = le16_to_cpu(retdatai);
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
#ifdef PEGASUS_WRITE_EEPROM
@ -324,10 +344,10 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#endif /* PEGASUS_WRITE_EEPROM */
#endif /* PEGASUS_WRITE_EEPROM */
static inline int get_node_id(pegasus_t *pegasus, u8 *id)
{
@ -367,19 +387,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
return;
err:
eth_hw_addr_random(pegasus->net);
dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
return;
}
static inline int reset_mac(pegasus_t *pegasus)
{
int ret, i;
__u8 data = 0x8;
int i;
set_register(pegasus, EthCtrl1, data);
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
ret = get_registers(pegasus, EthCtrl1, 1, &data);
if (ret < 0)
goto fail;
if (~data & 0x08) {
if (loopback)
break;
@ -402,22 +424,29 @@ static inline int reset_mac(pegasus_t *pegasus)
}
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
__u16 auxmode;
read_mii_word(pegasus, 3, 0x1b, &auxmode);
ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4;
write_mii_word(pegasus, 3, 0x1b, &auxmode);
}
return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
{
__u16 linkpart;
__u8 data[4];
pegasus_t *pegasus = netdev_priv(dev);
int ret;
__u16 linkpart;
__u8 data[4];
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
if (ret < 0)
goto fail;
data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@ -435,11 +464,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
u16 auxmode;
read_mii_word(pegasus, 0, 0x1b, &auxmode);
ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4;
write_mii_word(pegasus, 0, 0x1b, &auxmode);
}
return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
@ -447,9 +481,9 @@ static void read_bulk_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
u8 *buf = urb->transfer_buffer;
int rx_status, count = urb->actual_length;
int status = urb->status;
u8 *buf = urb->transfer_buffer;
__u16 pkt_len;
if (!pegasus)
@ -1004,8 +1038,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
data[0] = pegasus->phy;
fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
@ -1039,22 +1072,25 @@ static void pegasus_set_multicast(struct net_device *net)
static __u8 mii_phy_probe(pegasus_t *pegasus)
{
int i;
int i, ret;
__u16 tmp;
for (i = 0; i < 32; i++) {
read_mii_word(pegasus, i, MII_BMSR, &tmp);
ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
if (ret < 0)
goto fail;
if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
continue;
else
return i;
}
fail:
return 0xff;
}
static inline void setup_pegasus_II(pegasus_t *pegasus)
{
int ret;
__u8 data = 0xa5;
set_register(pegasus, Reg1d, 0);
@ -1066,7 +1102,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg7b, 2);
set_register(pegasus, 0x83, data);
get_registers(pegasus, 0x83, 1, &data);
ret = get_registers(pegasus, 0x83, 1, &data);
if (ret < 0)
goto fail;
if (data == 0xa5)
pegasus->chip = 0x8513;
@ -1081,6 +1119,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 6);
else
set_register(pegasus, Reg81, 2);
return;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
}
static void check_carrier(struct work_struct *work)

View File

@ -3953,17 +3953,28 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
case RTL_VER_06:
ocp_write_byte(tp, type, PLA_BP_EN, 0);
break;
case RTL_VER_14:
ocp_write_word(tp, type, USB_BP2_EN, 0);
ocp_write_word(tp, type, USB_BP_8, 0);
ocp_write_word(tp, type, USB_BP_9, 0);
ocp_write_word(tp, type, USB_BP_10, 0);
ocp_write_word(tp, type, USB_BP_11, 0);
ocp_write_word(tp, type, USB_BP_12, 0);
ocp_write_word(tp, type, USB_BP_13, 0);
ocp_write_word(tp, type, USB_BP_14, 0);
ocp_write_word(tp, type, USB_BP_15, 0);
break;
case RTL_VER_08:
case RTL_VER_09:
case RTL_VER_10:
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
case RTL_VER_14:
case RTL_VER_15:
default:
if (type == MCU_TYPE_USB) {
ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);
@ -4329,7 +4340,6 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
case RTL_VER_11:
case RTL_VER_12:
case RTL_VER_13:
case RTL_VER_14:
case RTL_VER_15:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
@ -4337,6 +4347,13 @@ static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac)
bp_start = PLA_BP_0;
max_bp = 8;
break;
case RTL_VER_14:
fw_reg = 0xf800;
bp_ba_addr = PLA_BP_BA;
bp_en_addr = USB_BP2_EN;
bp_start = PLA_BP_0;
max_bp = 16;
break;
default:
goto out;
}

View File

@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM
};
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
@ -2490,7 +2490,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
return -EOPNOTSUPP;
}
@ -2621,15 +2621,15 @@ static int virtnet_set_features(struct net_device *dev,
u64 offloads;
int err;
if ((dev->features ^ features) & NETIF_F_LRO) {
if ((dev->features ^ features) & NETIF_F_GRO_HW) {
if (vi->xdp_enabled)
return -EBUSY;
if (features & NETIF_F_LRO)
if (features & NETIF_F_GRO_HW)
offloads = vi->guest_offloads_capable;
else
offloads = vi->guest_offloads_capable &
~GUEST_OFFLOAD_LRO_MASK;
~GUEST_OFFLOAD_GRO_HW_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
@ -3109,9 +3109,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_LRO;
dev->features |= NETIF_F_GRO_HW;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
dev->hw_features |= NETIF_F_LRO;
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;

View File

@ -1367,6 +1367,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
nf_reset_ct(skb);
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@ -1429,6 +1431,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
nf_reset_ct(skb);
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;

View File

@ -111,7 +111,7 @@ mt7915_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
return MCU_CIPHER_NONE;
}
}

View File

@ -1035,7 +1035,8 @@ enum {
};
enum mcu_cipher_type {
MCU_CIPHER_WEP40 = 1,
MCU_CIPHER_NONE = 0,
MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,

View File

@ -111,7 +111,7 @@ mt7921_mcu_get_cipher(int cipher)
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
return MT_CIPHER_NONE;
return MCU_CIPHER_NONE;
}
}

View File

@ -198,7 +198,8 @@ struct sta_rec_sec {
} __packed;
enum mcu_cipher_type {
MCU_CIPHER_WEP40 = 1,
MCU_CIPHER_NONE = 0,
MCU_CIPHER_WEP40,
MCU_CIPHER_WEP104,
MCU_CIPHER_WEP128,
MCU_CIPHER_TKIP,

View File

@ -1846,9 +1846,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
kfree(opp_table->supported_hw);
opp_table->supported_hw = NULL;
opp_table->supported_hw_count = 0;
@ -1934,9 +1931,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
kfree(opp_table->prop_name);
opp_table->prop_name = NULL;
@ -2046,9 +2040,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
if (!opp_table->regulators)
goto put_opp_table;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_disable(opp_table->regulators[i]);
@ -2168,9 +2159,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
clk_put(opp_table->clk);
opp_table->clk = ERR_PTR(-EINVAL);
@ -2269,9 +2257,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
if (unlikely(!opp_table))
return;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
opp_table->set_opp = NULL;
mutex_lock(&opp_table->lock);

View File

@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
b->legacy_io->mapping = iomem_get_mapping();
b->legacy_mem->mapping = iomem_get_mapping();
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
if (error)

View File

@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)

View File

@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && NET
depends on HAS_IOMEM && PCI
depends on NET
imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP

View File

@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
list_add_rcu(&h->node, &h->ctlr->dh_list);
}
spin_unlock(&list_lock);
err = SCSI_DH_OK;
@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
synchronize_rcu();
kfree(h);
}

View File

@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd;
uint32_t adapno;
int iterator;
bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT;
@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL;
iterator = 0;
is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break;
if (iterator++ == adapno) {
is_found = true;
break;
}
}
if (!adapter) {
if (!is_found) {
*rval = -ENODEV;
return NULL;
}
@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno;
int iterator;
mraid_mmadp_t* adapter;
bool is_found;
/*
* When the kioc returns from driver, make sure it still doesn't
@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0;
adapter = NULL;
adapno = kioc->adapno;
is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break;
if (iterator++ == adapno) {
is_found = true;
break;
}
}
kioc->timedout = 0;
if (adapter) {
if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc );
}
}
else {
wake_up(&wait_q);

View File

@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task)
{
if (!del_timer(&task->slow_task->timer))
return;
del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
{
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
unsigned long flags;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
}
#define PM8001_TASK_TIMEOUT 20
@ -748,13 +752,10 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
}
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pm8001_dbg(pm8001_ha, FAIL,
"TMF task[%x]timeout.\n",
tmf->tmf);
goto ex_err;
}
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
goto ex_err;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@ -834,12 +835,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pm8001_dbg(pm8001_ha, FAIL,
"TMF task timeout.\n");
goto ex_err;
}
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
goto ex_err;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&

View File

@ -471,7 +471,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);
if(error) {
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
if (error != -ENXIO)
dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);

View File

@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
/*
* If the device state changes to SDEV_RUNNING, we need to run
* the queue to avoid I/O hang.
* If the device state changes to SDEV_RUNNING, we need to
* rescan the device to revalidate it, and run the queue to
* avoid I/O hang.
*/
if (ret == 0 && state == SDEV_RUNNING)
if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true);
}
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;

View File

@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
int ret = 0;
spin_lock_irqsave(&ctrl->txn_lock, flags);
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
SLIM_MAX_TIDS, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
goto slim_xfer_err;
}
}
/* Initialize tid to invalid value */
txn->tid = 0;
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
txn->mt, txn->mc, txn->la, ret);
slim_xfer_err:
if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
/*
* remove runtime-pm vote if this was TX only, or
* if there was error during this transaction

View File

@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
(mc == SLIM_USR_MC_GENERIC_ACK &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->ctrl.dev);
}
}
@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
{
u32 cfg = readl_relaxed(ctrl->ngd->base);
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
qcom_slim_ngd_init_dma(ctrl);
/* By default enable message queues */
@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
return 0;
}
qcom_slim_ngd_setup(ctrl);
return 0;
}
@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
}
/* controller state should be in sync with framework state */
complete(&ctrl->qmi.qmi_comp);
if (!pm_runtime_enabled(ctrl->dev) ||
!pm_runtime_suspended(ctrl->dev))
qcom_slim_ngd_runtime_resume(ctrl->dev);
if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
!pm_runtime_suspended(ctrl->ctrl.dev))
qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
else
pm_runtime_resume(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put(ctrl->dev);
pm_runtime_resume(ctrl->ctrl.dev);
pm_runtime_mark_last_busy(ctrl->ctrl.dev);
pm_runtime_put(ctrl->ctrl.dev);
ret = slim_register_controller(&ctrl->ctrl);
if (ret) {
@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
/* Make sure the last dma xfer is finished */
mutex_lock(&ctrl->tx_lock);
if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
pm_runtime_get_noresume(ctrl->dev);
pm_runtime_get_noresume(ctrl->ctrl.dev);
ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
qcom_slim_ngd_down(ctrl);
qcom_slim_ngd_exit_dma(ctrl);
@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
qcom_slim_ngd_exit_dma(ctrl);
if (!ctrl->qmi.handle)
return 0;

View File

@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
@ -53,8 +54,8 @@ struct qe_ic {
struct irq_chip hc_irq;
/* VIRQ numbers of QE high/low irqs */
unsigned int virq_high;
unsigned int virq_low;
int virq_high;
int virq_low;
};
/*
@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
static void __init qe_ic_init(struct device_node *node)
static int qe_ic_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void (*low_handler)(struct irq_desc *desc);
void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
struct resource res;
u32 ret;
struct resource *res;
struct device_node *node = pdev->dev.of_node;
ret = of_address_to_resource(node, 0, &res);
if (ret)
return;
qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
if (qe_ic == NULL)
return;
qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
&qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
kfree(qe_ic);
return;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
return -ENODEV;
}
qe_ic->regs = ioremap(res.start, resource_size(&res));
qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
if (qe_ic == NULL)
return -ENOMEM;
qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
if (qe_ic->regs == NULL) {
dev_err(dev, "failed to ioremap() registers\n");
return -ENODEV;
}
qe_ic->hc_irq = qe_ic_irq_chip;
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
qe_ic->virq_low = irq_of_parse_and_map(node, 1);
qe_ic->virq_high = platform_get_irq(pdev, 0);
qe_ic->virq_low = platform_get_irq(pdev, 1);
if (!qe_ic->virq_low) {
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
kfree(qe_ic);
return;
}
if (qe_ic->virq_high != qe_ic->virq_low) {
if (qe_ic->virq_low <= 0)
return -ENODEV;
if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
low_handler = qe_ic_cascade_low;
high_handler = qe_ic_cascade_high;
} else {
@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node)
high_handler = NULL;
}
qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
&qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
dev_err(dev, "failed to add irq domain\n");
return -ENODEV;
}
qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
if (high_handler) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
return 0;
}
static const struct of_device_id qe_ic_ids[] = {
{ .compatible = "fsl,qe-ic"},
{ .type = "qeic"},
{},
};
static struct platform_driver qe_ic_driver =
{
.driver = {
.name = "qe-ic",
.of_match_table = qe_ic_ids,
},
.probe = qe_ic_init,
};
static int __init qe_ic_of_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np) {
np = of_find_node_by_type(NULL, "qeic");
if (!np)
return -ENODEV;
}
qe_ic_init(np);
of_node_put(np);
platform_driver_register(&qe_ic_driver);
return 0;
}
subsys_initcall(qe_ic_of_init);

View File

@ -325,7 +325,15 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
f_pdata->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
/*
* For an op to be DTR, cmd phase along with every other non-empty
* phase should have dtr field set to 1. If an op phase has zero
* nbytes, ignore its dtr field; otherwise, check its dtr field.
*/
f_pdata->dtr = op->cmd.dtr &&
(!op->addr.nbytes || op->addr.dtr) &&
(!op->data.nbytes || op->data.dtr);
switch (op->data.buswidth) {
case 0:
@ -1227,8 +1235,15 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
{
bool all_true, all_false;
all_true = op->cmd.dtr && op->addr.dtr && op->dummy.dtr &&
op->data.dtr;
/*
* op->dummy.dtr is required for converting nbytes into ncycles.
* Also, don't check the dtr field of the op phase having zero nbytes.
*/
all_true = op->cmd.dtr &&
(!op->addr.nbytes || op->addr.dtr) &&
(!op->dummy.nbytes || op->dummy.dtr) &&
(!op->data.nbytes || op->data.dtr);
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;

View File

@ -167,10 +167,17 @@ err_put_ctlr:
return ret;
}
static const struct spi_device_id spi_mux_id[] = {
{ "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(spi, spi_mux_id);
static const struct of_device_id spi_mux_of_match[] = {
{ .compatible = "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(of, spi_mux_of_match);
static struct spi_driver spi_mux_driver = {
.probe = spi_mux_probe,
@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
.name = "spi-mux",
.of_match_table = spi_mux_of_match,
},
.id_table = spi_mux_id,
};
module_spi_driver(spi_mux_driver);

View File

@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
if (ctrl->bRequestType & 0x80) {
if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);

View File

@ -783,6 +783,9 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
@ -832,6 +835,9 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),

View File

@ -1709,6 +1709,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
return rlen;
}
static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum pd_msg_request message,
enum tcpm_ams ams);
static void tcpm_handle_vdm_request(struct tcpm_port *port,
const __le32 *payload, int cnt)
{
@ -1736,11 +1740,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_DONE;
}
if (PD_VDO_SVDM(p[0])) {
if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
} else {
if (port->negotiated_rev >= PD_REV30)
tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
}
/*
@ -2443,10 +2447,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
tcpm_handle_vdm_request(port, msg->payload, cnt);
else if (port->negotiated_rev > PD_REV20)
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_handle_vdm_request(port, msg->payload, cnt);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);

View File

@ -477,9 +477,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops, NULL);
if (adapter == NULL) {
if (IS_ERR(adapter)) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
return PTR_ERR(adapter);
}
pci_set_master(pdev);

View File

@ -454,11 +454,6 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
static bool map_empty(struct vhost_iotlb *iotlb)
{
return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
@ -466,10 +461,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int err = 0;
*change_map = false;
if (map_empty(iotlb)) {
mlx5_vdpa_destroy_mr(mvdev);
return 0;
}
mutex_lock(&mr->mkey_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");

View File

@ -752,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);
/* prefer split queue */
if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
}
static bool vq_is_tx(u16 idx)
@ -2010,6 +2010,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
return -ENOSPC;
mdev = mgtdev->madev->mdev;
if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
dev_warn(mdev->device, "missing support for split virtqueues\n");
return -EOPNOTSUPP;
}
/* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);

View File

@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->name);
if (!vdpasim)
if (IS_ERR(vdpasim)) {
ret = PTR_ERR(vdpasim);
goto err_alloc;
}
vdpasim->dev_attr = *dev_attr;
INIT_WORK(&vdpasim->work, dev_attr->work_fn);

View File

@ -400,9 +400,9 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, NULL);
if (vp_vdpa == NULL) {
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return -ENOMEM;
return PTR_ERR(vp_vdpa);
}
mdev = &vp_vdpa->mdev;

View File

@ -614,7 +614,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
long pinned;
int ret = 0;
if (msg->iova < v->range.first ||
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;

View File

@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
/* Make sure 64 bit math will not overflow. */
return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
if (uaddr > ULONG_MAX || size > ULONG_MAX)
return true;
if (!size)
return false;
return uaddr > ULONG_MAX - size + 1;
}
/* Caller should have vq mutex and device mutex. */

View File

@ -355,6 +355,7 @@ int register_virtio_device(struct virtio_device *dev)
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
spin_lock_init(&dev->vqs_list_lock);
/*
* device_add() causes the bus infrastructure to look for a matching

Some files were not shown because too many files have changed in this diff Show More