This is the 5.15.185 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmhAPlcACgkQONu9yGCS
 aT6M0w//RJzf9Lwvdobnq/yyl7YlthZQ2FDRIAvLRsJi6HF7kxgNNjLGHeYE31H/
 PhUz+7jzu/Vl47urn/QozINTIG32krGrsiDi+gmF2wDBFmqZSjmsWKEHWNuM3tpl
 R6LkviNoA1AJlCfO30sv9BTmE/97zJGj+c1DJKWcbLOE3ai0KXzS1YbPCzjm3FxY
 6s6H1v4jNzMCNecjyKAdjkAoZQzyLttyjhIV+nTT6pS57Cps79SjFe8YE4yzf9xB
 +8B/zYT8fEvsg0TqNZMwjujrIwyY0Vk2nCKOpZ2siXpDPsBS/5FSLl1FxL2Rxr24
 0qC8vGxa8BivLLsrTg3rfOe3RE96oM2XLfhyzYGlV9qduFjDCfW4yoqcZmLF0eUV
 sq925GCT/Xbyx8AhbCRfUwlDh8iprns6Yx5Jo9XlA7HmZdjN8awCEeocgW7oXI19
 ewzQr54JnLfaGMDRxjxVWBNip9TGwQDddxeAGjUkjlYmigi7gR82+P/0e0wweok2
 ATXxAPc2RuedMyd4U1FIA969q4AVdAbz9mdxAKv4i1I5g8DE6yUtutoDVUylbBgV
 gialH6g0NHgOUm6cuCe9apgEff2DIJ4HD14IYCMmup7vfnwD0YITVPntQYCtM/IX
 HnuA20/LO5NIjXW3JaV0hXjjptFzD3f//Zw9iormHU6LSThLcwM=
 =8ouQ
 -----END PGP SIGNATURE-----

Merge tag 'v5.15.185' into v5.15/standard/base

This is the 5.15.185 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmhAPlcACgkQONu9yGCS
# aT6M0w//RJzf9Lwvdobnq/yyl7YlthZQ2FDRIAvLRsJi6HF7kxgNNjLGHeYE31H/
# PhUz+7jzu/Vl47urn/QozINTIG32krGrsiDi+gmF2wDBFmqZSjmsWKEHWNuM3tpl
# R6LkviNoA1AJlCfO30sv9BTmE/97zJGj+c1DJKWcbLOE3ai0KXzS1YbPCzjm3FxY
# 6s6H1v4jNzMCNecjyKAdjkAoZQzyLttyjhIV+nTT6pS57Cps79SjFe8YE4yzf9xB
# +8B/zYT8fEvsg0TqNZMwjujrIwyY0Vk2nCKOpZ2siXpDPsBS/5FSLl1FxL2Rxr24
# 0qC8vGxa8BivLLsrTg3rfOe3RE96oM2XLfhyzYGlV9qduFjDCfW4yoqcZmLF0eUV
# sq925GCT/Xbyx8AhbCRfUwlDh8iprns6Yx5Jo9XlA7HmZdjN8awCEeocgW7oXI19
# ewzQr54JnLfaGMDRxjxVWBNip9TGwQDddxeAGjUkjlYmigi7gR82+P/0e0wweok2
# ATXxAPc2RuedMyd4U1FIA969q4AVdAbz9mdxAKv4i1I5g8DE6yUtutoDVUylbBgV
# gialH6g0NHgOUm6cuCe9apgEff2DIJ4HD14IYCMmup7vfnwD0YITVPntQYCtM/IX
# HnuA20/LO5NIjXW3JaV0hXjjptFzD3f//Zw9iormHU6LSThLcwM=
# =8ouQ
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed 04 Jun 2025 08:38:47 AM EDT
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2025-06-06 11:41:44 -04:00
commit 118b2e7708
241 changed files with 2046 additions and 881 deletions

View File

@ -5445,6 +5445,8 @@
Selecting 'on' will also enable the mitigation
against user space to user space task attacks.
Selecting specific mitigation does not force enable
user mitigations.
Selecting 'off' will disable both the kernel and
the user space protections.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 184
SUBLEVEL = 185
EXTRAVERSION =
NAME = Trick or Treat
@ -814,6 +814,18 @@ KBUILD_CFLAGS += -Wno-gnu
# source of a reference will be _MergedGlobals and not on of the whitelisted names.
# See modpost pattern 2
KBUILD_CFLAGS += -mno-global-merge
# Clang may emit a warning when a const variable, such as the dummy variables
# in typecheck(), or const member of an aggregate type are not initialized,
# which can result in unexpected behavior. However, in many audited cases of
# the "field" variant of the warning, this is intentional because the field is
# never used within a particular call path, the field is within a union with
# other non-const members, or the containing object is not const so the field
# can be modified via memcpy() / memset(). While the variable warning also gets
# disabled with this same switch, there should not be too much coverage lost
# because -Wuninitialized will still flag when an uninitialized const variable
# is used.
KBUILD_CFLAGS += $(call cc-disable-warning, default-const-init-unsafe)
else
# Warn about unmarked fall-throughs in switch statement.

View File

@ -126,7 +126,7 @@
reg = <0x54400000 0x00040000>;
clocks = <&tegra_car TEGRA114_CLK_DSIB>,
<&tegra_car TEGRA114_CLK_DSIBLP>,
<&tegra_car TEGRA114_CLK_PLL_D2_OUT0>;
<&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 82>;
reset-names = "dsi";

View File

@ -350,11 +350,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val)
{
unsigned char modified_gray_code[] = {
0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
0x10, 0x11,
/* SYNOPSYS workaround to fix a bug in the calibration logic */
unsigned char modified_fix_code[] = {
0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
0x1e, 0x1f,
};
unsigned int tmp, index;
int i;
@ -365,25 +366,25 @@ static int at91_suspend_finish(unsigned long val)
* restore the ZQ0SR0 with the value saved here. But the
* calibration is buggy and restoring some values from ZQ0SR0
* is forbidden and risky thus we need to provide processed
* values for these (modified gray code values).
* values for these.
*/
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/*
* The 1st 8 words of memory might get corrupted in the process

View File

@ -150,28 +150,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
&r_ir {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* FIXME: We can't add that supply for now since it would
* create a circular dependency between pinctrl, the regulator
* and the RSB Bus.
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -289,6 +273,22 @@
};
};
&r_ir {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* PL0 and PL1 are used for PMIC I2C
* don't enable the pl-supply else
* it will fail at boot
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&spdif {
status = "okay";
};

View File

@ -175,16 +175,12 @@
vcc-pg-supply = <&reg_vcc_wifi_io>;
};
&r_ir {
status = "okay";
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -295,6 +291,10 @@
};
};
&r_ir {
status = "okay";
};
&rtc {
clocks = <&ext_osc32k>;
};

View File

@ -112,20 +112,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
&r_ir {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -240,6 +232,14 @@
};
};
&r_ir {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&rtc {
clocks = <&ext_osc32k>;
};

View File

@ -1638,7 +1638,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_1v8>;
};

View File

@ -240,7 +240,7 @@
no-map;
};
pil_camera_mem: mmeory@85200000 {
pil_camera_mem: memory@85200000 {
reg = <0x0 0x85200000 0x0 0x500000>;
no-map;
};

View File

@ -623,7 +623,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
PUD_TYPE_TABLE)
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))

View File

@ -87,4 +87,20 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifndef __ASSEMBLY__
/*
* Some syscall entry functions on mips start with "__sys_" (fork and clone,
* for instance). We should also match the sys_ variant with those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym,
const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FTRACE_SYSCALLS */
#endif /* _ASM_MIPS_FTRACE_H */

View File

@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/*
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
/* Used to synchronize entry to deep idle states */
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
cps_nc_entry_fn entry;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
atomic_t *barrier;
/* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state];
entry = per_cpu(nc_asm_enter, cpu)[state];
if (!entry)
return -EINVAL;
@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core);
core_ready_count = per_cpu(ready_count, cpu);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Ensure ready_count is zero-initialised before the assembly runs */
WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
coupled_barrier(barrier, online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
@ -635,12 +634,14 @@ out_err:
static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int sibling, core;
void *entry_fn, *core_rc;
enum cps_pm_state state;
core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state])
if (per_cpu(nc_asm_enter, cpu)[state])
continue;
if (!test_bit(state, state_support))
continue;
@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
clear_bit(state, state_support);
}
per_cpu(nc_asm_enter, core)[state] = entry_fn;
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
}
if (!per_cpu(ready_count, core)) {
if (!per_cpu(ready_count, cpu)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count, core) = core_rc;
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(ready_count, sibling) = core_rc;
}
return 0;

View File

@ -2978,11 +2978,11 @@ static void __init fixup_device_tree_pmac(void)
char type[8];
phandle node;
// Some pmacs are missing #size-cells on escc nodes
// Some pmacs are missing #size-cells on escc or i2s nodes
for (node = 0; prom_next_node(&node); ) {
type[0] = '\0';
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "escc"))
if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
continue;
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)

View File

@ -2183,6 +2183,10 @@ static struct pmu power_pmu = {
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE)
#define SIER_TYPE_SHIFT 15
#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@ -2251,6 +2255,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0;
/*
* SIER[46-48] presents instruction type of the sampled instruction.
* In ISA v3.0 and before values "0" and "7" are considered reserved.
* In ISA v3.1, value "7" has been used to indicate "larx/stcx".
* Drop the sample if "type" has reserved values for this field with a
* ISA version check.
*/
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
ppmu->get_mem_data_src) {
val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
record = 0;
atomic64_inc(&event->lost_samples);
}
}
/*
* Finally record data if requested.
*/

View File

@ -275,8 +275,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
dsrc->val = 0;
return;
}
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;

View File

@ -153,5 +153,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
-o -name '*.gcov' \) -type f -print | xargs rm -f
$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH

View File

@ -49,6 +49,7 @@ void __init mem_init(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
min_low_pfn = PFN_UP(__pa(uml_reserved));
/* this will put all low memory onto the freelists */
memblock_free_all();

View File

@ -22,6 +22,7 @@
# This script requires:
# bash
# syslinux
# genisoimage
# mtools (for fdimage* and hdimage)
# edk2/OVMF (for hdimage)
#
@ -250,7 +251,9 @@ geniso() {
cp "$isolinux" "$ldlinux" "$tmp_dir"
cp "$FBZIMAGE" "$tmp_dir"/linux
echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
if [ ${#FDINITRDS[@]} -gt 0 ]; then
cp "${FDINITRDS[@]}" "$tmp_dir"/
fi
genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
-quiet -o "$FIMAGE" -b isolinux.bin \
-c boot.cat -no-emul-boot -boot-load-size 4 \

View File

@ -803,7 +803,8 @@ static __init int perf_event_ibs_init(void)
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
IBS_OP_CUR_CNT_EXT_MASK);
}
ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");

View File

@ -98,7 +98,7 @@ static inline u8 *its_static_thunk(int reg)
}
#endif
#ifdef CONFIG_RETHUNK
#if defined(CONFIG_RETHUNK) && defined(CONFIG_STACK_VALIDATION)
extern bool cpu_wants_rethunk(void);
extern bool cpu_wants_rethunk_at(void *addr);
#else

View File

@ -58,6 +58,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
void stop_nmi(void);
void restart_nmi(void);
void local_touch_nmi(void);

View File

@ -417,6 +417,7 @@ struct pebs_xmm {
*/
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)

View File

@ -1382,9 +1382,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(void)
{
enum spectre_v2_user_cmd mode;
char arg[20];
int ret, i;
mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
@ -1397,7 +1401,7 @@ spectre_v2_parse_user_cmdline(void)
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg));
if (ret < 0)
return SPECTRE_V2_USER_CMD_AUTO;
return mode;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) {
@ -1407,8 +1411,8 @@ spectre_v2_parse_user_cmdline(void)
}
}
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
return SPECTRE_V2_USER_CMD_AUTO;
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
return mode;
}
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)

View File

@ -38,8 +38,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
/*
* An emergency handler can be set in any context including NMI
*/
struct nmi_desc {
raw_spinlock_t lock;
nmi_handler_t emerg_handler;
struct list_head head;
};
@ -121,9 +125,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
static int nmi_handle(unsigned int type, struct pt_regs *regs)
{
struct nmi_desc *desc = nmi_to_desc(type);
nmi_handler_t ehandler;
struct nmiaction *a;
int handled=0;
/*
* Call the emergency handler, if set
*
* In the case of crash_nmi_callback() emergency handler, it will
* return in the case of the crashing CPU to enable it to complete
* other necessary crashing actions ASAP. Other handlers in the
* linked list won't need to be run.
*/
ehandler = desc->emerg_handler;
if (ehandler)
return ehandler(type, regs);
rcu_read_lock();
/*
@ -209,6 +226,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
/**
* set_emergency_nmi_handler - Set emergency handler
* @type: NMI type
* @handler: the emergency handler to be stored
*
* Set an emergency NMI handler which, if set, will preempt all the other
* handlers in the linked list. If a NULL handler is passed in, it will clear
* it. It is expected that concurrent calls to this function will not happen
* or the system is screwed beyond repair.
*/
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
{
struct nmi_desc *desc = nmi_to_desc(type);
if (WARN_ON_ONCE(desc->emerg_handler == handler))
return;
desc->emerg_handler = handler;
/*
* Ensure the emergency handler is visible to other CPUs before
* function return
*/
smp_wmb();
}
static void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{

View File

@ -874,15 +874,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* Return what? */
/*
* Ensure the new callback function is set before sending
* out the NMI
* Set emergency handler to preempt other handlers.
*/
wmb();
set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR);

View File

@ -96,8 +96,14 @@ void __init kernel_randomize_memory(void)
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
/* Adapt physical memory region size based on available memory */
if (memory_tb < kaslr_regions[0].size_tb)
/*
* Adapt physical memory region size based on available memory,
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
* device BAR space assuming the direct map space is large enough
* for creating a ZONE_DEVICE mapping in the direct map corresponding
* to the physical BAR address.
*/
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb;
/*

View File

@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
regs->gp[CS / sizeof(unsigned long)] |= 3;
regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
#endif
}

View File

@ -262,10 +262,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
return err;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
return err;
}

View File

@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;

View File

@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;

View File

@ -437,7 +437,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
tristate "Hardware Error Device"
bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via

View File

@ -72,7 +72,12 @@ static struct acpi_driver acpi_hed_driver = {
.notify = acpi_hed_notify,
},
};
module_acpi_driver(acpi_hed_driver);
static int __init acpi_hed_driver_init(void)
{
return acpi_bus_register_driver(&acpi_hed_driver);
}
subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");

View File

@ -594,18 +594,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
struct charlcd *charlcd_alloc(void)
struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
lcd->drvdata = priv->drvdata;
return lcd;
}

View File

@ -49,7 +49,7 @@ struct charlcd {
unsigned long y;
} addr;
void *drvdata;
void *drvdata; /* Set by charlcd_alloc() */
};
/**
@ -93,7 +93,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
struct charlcd *charlcd_alloc(void);
struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);

View File

@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev)
if (!hdc)
return -ENOMEM;
lcd = charlcd_alloc();
lcd = charlcd_alloc(0);
if (!lcd)
goto fail1;

View File

@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
if (err < 0)
return err;
lcd = charlcd_alloc();
lcd = charlcd_alloc(0);
if (!lcd)
return -ENOMEM;

View File

@ -835,7 +835,7 @@ static void lcd_init(void)
if (!hdc)
return;
charlcd = charlcd_alloc();
charlcd = charlcd_alloc(0);
if (!charlcd) {
kfree(hdc);
return;

View File

@ -53,7 +53,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};

View File

@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -405,11 +406,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
struct imx8mp_clock_constraints {
unsigned int clkid;
u32 maxrate;
};
/*
* Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
* Table 13. Maximum frequency of modules.
* Probable typos fixed are marked with a comment.
*/
static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
{ IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
{ IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
{ IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
{ IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_AHB, 133333333 },
{ IMX8MP_CLK_IPG_ROOT, 66666667 },
{ IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
{ IMX8MP_CLK_DRAM_ALT, 666666667 },
{ IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
{ IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
{ IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
{ IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
{ IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
{ IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
{ IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
{
const struct imx8mp_clock_constraints *constr;
for (constr = constraints; constr->clkid; constr++)
clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
}
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@ -720,6 +861,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
err = of_property_read_string(np, "fsl,operating-mode", &opmode);
if (!err) {
if (!strcmp(opmode, "nominal"))
imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
else if (!strcmp(opmode, "overdrive"))
imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
}
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");

View File

@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.parent_data = cam_cc_parent_data_2,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.parent_data = cam_cc_parent_data_3,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};

View File

@ -119,6 +119,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu)
{
/* Ensure the GIC counter is running */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
@ -253,9 +256,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
pr_warn("Unable to register clock notifier\n");
}
/* And finally start the counter */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with

View File

@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
/* set same policy for all cpus in a cluster */
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
if (data->cpus[cpu].bpmp_cluster_id == cluster)
cpumask_set_cpu(cpu, policy->cpus);
}
return 0;
}

View File

@ -249,8 +249,19 @@ again:
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
if ((divisor * 4) <= INTERVALS * 3)
if (divisor * 4 <= INTERVALS * 3) {
/*
* If there are sufficiently many data points still under
* consideration after the outliers have been eliminated,
* returning without a prediction would be a mistake because it
* is likely that the next interval will not exceed the current
* maximum, so return the latter in that case.
*/
if (divisor >= INTERVALS / 2)
return max;
return UINT_MAX;
}
thresh = max - 1;
goto again;

View File

@ -410,9 +410,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
dev_err(&pdev->dev,
"Request failed with software error code 0x%x\n",
cpt_status->s.uc_compcode);
pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
cpt_status->s.uc_compcode,
info->req->areq->tfm->__crt_alg->cra_name,
info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}

View File

@ -397,10 +397,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
int i, j, ret;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
u32 addr_decode, mad_offset;
u32 addr_decode[IE31200_CHANNELS], mad_offset;
/*
* Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
@ -458,19 +457,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mad_offset = IE31200_MAD_DIMM_0_OFFSET;
}
/* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
addr_decode = readl(window + mad_offset +
addr_decode[i] = readl(window + mad_offset +
(i * 4));
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
populate_dimm_info(&dimm_info[i][j], addr_decode, j,
skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info[i][j].size,
dimm_info[i][j].dual_rank,
dimm_info[i][j].x16_width);
}
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]);
}
/*
@ -481,14 +471,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
*/
for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
for (j = 0; j < IE31200_CHANNELS; j++) {
struct dimm_data dimm_info;
struct dimm_info *dimm;
unsigned long nr_pages;
nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
populate_dimm_info(&dimm_info, addr_decode[j], i,
skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info.size,
dimm_info.dual_rank,
dimm_info.x16_width);
nr_pages = IE31200_PAGES(dimm_info.size, skl);
if (nr_pages == 0)
continue;
if (dimm_info[j][i].dual_rank) {
if (dimm_info.dual_rank) {
nr_pages = nr_pages / 2;
dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;

View File

@ -190,6 +190,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;

View File

@ -52,7 +52,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
#define V2_CREDIT_TIMEOUT_US 20000
#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000

View File

@ -353,7 +353,6 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
if (psp->sos_fw) {
@ -373,8 +372,8 @@ static int psp_sw_fini(void *handle)
adev->asic_type == CHIP_SIENNA_CICHLID)
psp_sysfs_fini(adev);
kfree(cmd);
cmd = NULL;
kfree(psp->cmd);
psp->cmd = NULL;
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);

View File

@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);

View File

@ -807,6 +807,14 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@ -819,14 +827,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (process) {
pr_debug("Process already found\n");
} else {
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
process = create_process(thread);
if (IS_ERR(process))
goto out;

View File

@ -2710,11 +2710,6 @@ static int dm_resume(void *handle)
return 0;
}
/* leave display off for S4 sequence */
if (adev->in_s4)
return 0;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);

View File

@ -248,6 +248,7 @@ static bool create_links(
link->link_id.type = OBJECT_TYPE_CONNECTOR;
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {

View File

@ -386,11 +386,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@ -428,6 +423,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
// Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);

View File

@ -105,7 +105,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return false;
}
switch (mode->crtc_hdisplay) {
switch (mode->hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
@ -116,7 +116,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
case 1280:
if (mode->crtc_vdisplay == 800)
if (mode->vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@ -128,7 +128,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
if (mode->crtc_vdisplay == 900)
if (mode->vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@ -137,7 +137,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
if (mode->crtc_vdisplay == 1080)
if (mode->vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@ -181,6 +181,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde;
adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
@ -190,6 +191,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table->hfp +
vbios_mode->enh_table->hsync);
adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde;
adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;

View File

@ -563,6 +563,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_encoder *drm_enc;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
if (!drm_enc->possible_clones) {
DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
continue;
}
if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
crtc_state->encoder_mask) {
DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
crtc->base.id, crtc_state->encoder_mask);
return -EINVAL;
}
}
return 0;
}
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@ -729,6 +753,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
ret = drm_atomic_check_valid_clones(state, crtc);
if (ret != 0)
return ret;
}
/*

View File

@ -5172,6 +5172,7 @@ drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));

View File

@ -222,7 +222,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
u8 *buf;
struct opregion_header *header;
struct vbt v;
const char opregion_signature[16] = OPREGION_SIGNATURE;
static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1);
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@ -236,8 +237,9 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
memcpy(header->signature, opregion_signature,
sizeof(opregion_signature));
memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature));
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;

View File

@ -346,12 +346,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
if (dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
if (dpi->conf->edge_sel_en)
if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}

View File

@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
#define USB_VENDOR_ID_ADATA_XPG 0x125f
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155

View File

@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },

View File

@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));

View File

@ -394,7 +394,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= fan_data->num_speed)
return -EINVAL;
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
return 0;
}
@ -490,7 +495,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data)
{
struct gpio_fan_data *fan_data = data;
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
}
static int gpio_fan_probe(struct platform_device *pdev)
@ -564,7 +573,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
return 0;
@ -574,8 +585,11 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->gpios)
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
return 0;
}

View File

@ -110,7 +110,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr;
void *pcc_comm_addr;
u64 usecs_lat;
unsigned int usecs_lat;
};
/*

View File

@ -1508,7 +1508,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.name);
}
clk_prepare_enable(i2c->clk);
ret = clk_prepare_enable(i2c->clk);
if (ret)
return dev_err_probe(&dev->dev, ret,
"failed to enable clock\n");
if (i2c->use_pio) {
i2c->adap.algo = &i2c_pxa_pio_algorithm;

View File

@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@ -150,6 +151,8 @@
/* TAG length for DATA READ in RX FIFO */
#define READ_RX_TAGS_LEN 2
#define QUP_BUS_WIDTH 8
static unsigned int scl_freq;
module_param_named(scl_freq, scl_freq, uint, 0444);
MODULE_PARM_DESC(scl_freq, "SCL frequency override");
@ -227,6 +230,7 @@ struct qup_i2c_dev {
int irq;
struct clk *clk;
struct clk *pclk;
struct icc_path *icc_path;
struct i2c_adapter adap;
int clk_ctl;
@ -255,6 +259,10 @@ struct qup_i2c_dev {
/* To configure when bus is in run state */
u32 config_run;
/* bandwidth votes */
u32 src_clk_freq;
u32 cur_bw_clk_freq;
/* dma parameters */
bool is_dma;
/* To check if the current transfer is using DMA */
@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
return ret;
}
static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq)
{
u32 needed_peak_bw;
int ret;
if (qup->cur_bw_clk_freq == clk_freq)
return 0;
needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH);
ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw);
if (ret)
return ret;
qup->cur_bw_clk_freq = clk_freq;
return 0;
}
static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
{
struct qup_i2c_block *blk = &qup->blk;
@ -840,6 +865,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret = 0;
int idx = 0;
ret = qup_i2c_vote_bw(qup, qup->src_clk_freq);
if (ret)
return ret;
enable_irq(qup->irq);
ret = qup_i2c_req_dma(qup);
@ -1645,6 +1674,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
config = readl(qup->base + QUP_CONFIG);
config |= QUP_CLOCK_AUTO_GATE;
writel(config, qup->base + QUP_CONFIG);
qup_i2c_vote_bw(qup, 0);
clk_disable_unprepare(qup->pclk);
}
@ -1745,6 +1775,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
goto fail_dma;
}
qup->is_dma = true;
qup->icc_path = devm_of_icc_get(&pdev->dev, NULL);
if (IS_ERR(qup->icc_path))
return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path),
"failed to get interconnect path\n");
}
nodma:
@ -1793,6 +1828,7 @@ nodma:
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
}
qup->src_clk_freq = src_clk_freq;
/*
* Bootloaders might leave a pending interrupt on certain QUP's,

View File

@ -437,6 +437,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
svc_i3c_master_emit_stop(master);
break;
default:
break;
}

View File

@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
struct scatterlist *sg;
unsigned long curr_len = 0;
dma_addr_t curr_base = ~0;
unsigned long va, pgoff;
struct scatterlist *sg;
dma_addr_t mask;
dma_addr_t end;
int i;
umem->iova = va = virt;
@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
pgoff = umem->address & ~PAGE_MASK;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/* Walk SGL and reduce max page size if VA/PA bits differ
* for any address.
/* If the current entry is physically contiguous with the previous
* one, no need to take its start addresses into consideration.
*/
mask |= (sg_dma_address(sg) + pgoff) ^ va;
va += sg_dma_len(sg) - pgoff;
/* Except for the last entry, the ending iova alignment sets
* the maximum possible page size as the low bits of the iova
* must be zero when starting the next chunk.
if (check_add_overflow(curr_base, curr_len, &end) ||
end != sg_dma_address(sg)) {
curr_base = sg_dma_address(sg);
curr_len = 0;
/* Reduce max page size if VA/PA bits differ */
mask |= (curr_base + pgoff) ^ va;
/* The alignment of any VA matching a discontinuity point
* in the physical memory sets the maximum possible page
* size as this must be a starting point of a new page that
* needs to be aligned.
*/
if (i != (umem->sgt_append.sgt.nents - 1))
if (i != 0)
mask |= va;
}
curr_len += sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
pgoff = 0;
}

View File

@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
goto err_free;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_free;
}
@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
if (cmd.flags & IB_MR_REREG_PD) {
new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
attrs);
if (!new_pd) {
ret = -EINVAL;
if (IS_ERR(new_pd)) {
ret = PTR_ERR(new_pd);
goto put_uobjs;
}
} else {
@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
return PTR_ERR(uobj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_free;
}
@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = attrs->ucore.outbuf;
@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ind_tbl = uobj_get_obj_read(rwq_ind_table,
UVERBS_OBJECT_RWQ_IND_TBL,
cmd->rwq_ind_tbl_handle, attrs);
if (!ind_tbl) {
ret = -EINVAL;
if (IS_ERR(ind_tbl)) {
ret = PTR_ERR(ind_tbl);
goto err_put;
}
@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->is_srq) {
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
cmd->srq_handle, attrs);
if (!srq || srq->srq_type == IB_SRQT_XRC) {
ret = -EINVAL;
if (IS_ERR(srq) ||
srq->srq_type == IB_SRQT_XRC) {
ret = IS_ERR(srq) ? PTR_ERR(srq) :
-EINVAL;
goto err_put;
}
}
@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rcq = uobj_get_obj_read(
cq, UVERBS_OBJECT_CQ,
cmd->recv_cq_handle, attrs);
if (!rcq) {
ret = -EINVAL;
if (IS_ERR(rcq)) {
ret = PTR_ERR(rcq);
goto err_put;
}
}
}
}
if (has_sq)
if (has_sq) {
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
if (IS_ERR(scq)) {
ret = PTR_ERR(scq);
goto err_put;
}
}
if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_put;
}
@ -1483,18 +1491,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
err_put:
if (!IS_ERR(xrcd_uobj))
uobj_put_read(xrcd_uobj);
if (pd)
if (!IS_ERR_OR_NULL(pd))
uobj_put_obj_read(pd);
if (scq)
if (!IS_ERR_OR_NULL(scq))
rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (rcq && rcq != scq)
if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (srq)
if (!IS_ERR_OR_NULL(srq))
rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (ind_tbl)
if (!IS_ERR_OR_NULL(ind_tbl))
uobj_put_obj_read(ind_tbl);
uobj_alloc_abort(&obj->uevent.uobject, attrs);
@ -1656,8 +1664,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -1762,8 +1770,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2028,8 +2036,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2066,9 +2074,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
user_wr->wr.ud.ah, attrs);
if (!ud->ah) {
if (IS_ERR(ud->ah)) {
ret = PTR_ERR(ud->ah);
kfree(ud);
ret = -EINVAL;
goto out_put;
}
ud->remote_qpn = user_wr->wr.ud.remote_qpn;
@ -2305,8 +2313,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2356,8 +2364,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq) {
ret = -EINVAL;
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
goto out;
}
@ -2413,8 +2421,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
}
@ -2483,8 +2491,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp)
return -EINVAL;
if (IS_ERR(qp))
return PTR_ERR(qp);
obj = qp->uobject;
@ -2533,8 +2541,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp)
return -EINVAL;
if (IS_ERR(qp))
return PTR_ERR(qp);
obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
@ -2668,8 +2676,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
attrs);
if (!ib_spec->action.act)
return -EINVAL;
if (IS_ERR(ib_spec->action.act))
return PTR_ERR(ib_spec->action.act);
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
flow_resources_add(uflow_res,
@ -2686,8 +2694,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
attrs);
if (!ib_spec->flow_count.counters)
return -EINVAL;
if (IS_ERR(ib_spec->flow_count.counters))
return PTR_ERR(ib_spec->flow_count.counters);
ib_spec->flow_count.size =
sizeof(struct ib_flow_spec_action_count);
flow_resources_add(uflow_res,
@ -2905,14 +2913,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return PTR_ERR(obj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
err = -EINVAL;
if (IS_ERR(pd)) {
err = PTR_ERR(pd);
goto err_uobj;
}
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq) {
err = -EINVAL;
if (IS_ERR(cq)) {
err = PTR_ERR(cq);
goto err_put_pd;
}
@ -3013,8 +3021,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
return -EINVAL;
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
if (!wq)
return -EINVAL;
if (IS_ERR(wq))
return PTR_ERR(wq);
if (cmd.attr_mask & IB_WQ_FLAGS) {
wq_attr.flags = cmd.flags;
@ -3097,8 +3105,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
num_read_wqs++) {
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
wqs_handles[num_read_wqs], attrs);
if (!wq) {
err = -EINVAL;
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
goto put_wqs;
}
@ -3253,8 +3261,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
err = -EINVAL;
if (IS_ERR(qp)) {
err = PTR_ERR(qp);
goto err_uobj;
}
@ -3400,15 +3408,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
if (ib_srq_has_cq(cmd->srq_type)) {
attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->cq_handle, attrs);
if (!attr.ext.cq) {
ret = -EINVAL;
if (IS_ERR(attr.ext.cq)) {
ret = PTR_ERR(attr.ext.cq);
goto err_put_xrcd;
}
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_put_cq;
}
@ -3515,8 +3523,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq)
return -EINVAL;
if (IS_ERR(srq))
return PTR_ERR(srq);
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
@ -3543,8 +3551,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq)
return -EINVAL;
if (IS_ERR(srq))
return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
@ -3669,8 +3677,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
return -EOPNOTSUPP;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);

View File

@ -2967,22 +2967,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
unsigned int sg_delta;
unsigned int delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
delta = BIT_ULL(biter->__pg_bit) - block_offset;
if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
biter->__sg_advance += sg_delta;
} else {
while (biter->__sg_nents && biter->__sg &&
sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}
biter->__sg_advance += delta;
return true;
}

View File

@ -350,11 +350,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
mutex_lock(&con_mutex);
if (of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", index, &spec)) {
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-ENODEV);
return ERR_PTR(ret);
}
chan = ERR_PTR(-EPROBE_DEFER);

View File

@ -2883,6 +2883,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
return to_cblock(size);
}
static bool can_resume(struct cache *cache)
{
/*
* Disallow retrying the resume operation for devices that failed the
* first resume attempt, as the failure leaves the policy object partially
* initialized. Retrying could trigger BUG_ON when loading cache mappings
* into the incomplete policy object.
*/
if (cache->sized && !cache->loaded_mappings) {
if (get_cache_mode(cache) != CM_WRITE)
DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
cache_device_name(cache));
else
DMERR("%s: unable to resume cache due to missing proper cache table reload",
cache_device_name(cache));
return false;
}
return true;
}
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@ -2931,6 +2952,9 @@ static int cache_preresume(struct dm_target *ti)
struct cache *cache = ti->private;
dm_cblock_t csize = get_cache_dev_size(cache);
if (!can_resume(cache))
return -EINVAL;
/*
* Check to see if the cache has resized.
*/

View File

@ -661,6 +661,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
DMERR("%s: too large device", dm_device_name(t->md));
return -EINVAL;
}
tgt->type = dm_get_target_type(type);
if (!tgt->type) {

View File

@ -219,12 +219,14 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
}
}
if (!csid->testgen.enabled &&
!media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
@ -298,7 +300,8 @@ static void csid_try_format(struct csid_device *csid,
break;
case MSM_CSID_PAD_SRC:
if (csid->testgen_mode->cur.val == 0) {
if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
@ -348,7 +351,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->formats[code->index].code;
} else {
if (csid->testgen_mode->cur.val == 0) {
if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
@ -707,7 +711,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
if (csid->testgen_mode->cur.val != 0)
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
@ -800,13 +805,15 @@ int msm_csid_register_entity(struct csid_device *csid,
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
if (ret < 0) {
dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
return ret;
}
csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
csid->testgen_mode =
v4l2_ctrl_new_std_menu_items(&csid->ctrls,
&csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
csid->testgen.nmodes, 0, 0,
csid->testgen.modes);
@ -818,6 +825,7 @@ int msm_csid_register_entity(struct csid_device *csid,
}
csid->subdev.ctrl_handler = &csid->ctrls;
}
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
@ -848,6 +856,7 @@ int msm_csid_register_entity(struct csid_device *csid,
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
@ -861,5 +870,6 @@ void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
v4l2_ctrl_handler_free(&csid->ctrls);
}

View File

@ -811,13 +811,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
of_node_put(i2c_bus);
ret = -ENODEV;
goto err_node_put;
}
of_node_put(i2c_bus);
tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);

View File

@ -1722,6 +1722,8 @@ static void cx231xx_video_dev_init(
vfd->lock = &dev->lock;
vfd->release = video_device_release_empty;
vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);

View File

@ -35,6 +35,12 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
unsigned int size;
int ret;
if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) {
uvc_dbg(chain->dev, CONTROL,
"Unsupported UVC data type %u\n", xmap->data_type);
return -EINVAL;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;

View File

@ -314,6 +314,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
memset(config, 0, sizeof(*config));
return check_pad(sd, pad) ? :
sd->ops->pad->get_mbus_config(sd, pad, config);
}

View File

@ -677,8 +677,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
if (mode == MMC_POWER_OFF)
if (mode == MMC_POWER_OFF) {
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
usleep_range(15000, 17500);
return;
}
/*
* Bus power might not enable after D3 -> D0 transition due to the

View File

@ -2009,10 +2009,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
if (clk & SDHCI_CLOCK_CARD_EN)
sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
SDHCI_CLOCK_CONTROL);
if (clock == 0)
if (clock == 0) {
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
return;
}
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_enable_clk(host, clk);

View File

@ -2355,7 +2355,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
if (!all && (!bond->params.fail_over_mac ||
if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))

View File

@ -334,7 +334,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
if (np && of_property_read_bool(np, "syscon-raminit")) {
if (np && of_property_present(np, "syscon-raminit")) {
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;

View File

@ -9,8 +9,6 @@
#include "main.h"
static const struct acpi_device_id xge_acpi_match[];
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@ -733,7 +731,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
.acpi_match_table = ACPI_PTR(xge_acpi_match),
.acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,

View File

@ -1264,6 +1264,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
int rx_ring_last)
{
while (rx_ring_first != rx_ring_last) {
enetc_flip_rx_buff(rx_ring,
&rx_ring->rx_swbd[rx_ring_first]);
enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
}
}
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@ -1379,11 +1389,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
while (orig_i != i) {
enetc_flip_rx_buff(rx_ring,
&rx_ring->rx_swbd[orig_i]);
enetc_bdr_idx_inc(rx_ring, &orig_i);
}
enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}

View File

@ -15,13 +15,17 @@
#define LMT_TBL_OP_WRITE 1
#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
#define LMT_MAX_VFS 256
#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
u64 tbl_base;
u64 tbl_base, cfg;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
@ -35,6 +39,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
/* 2048 LMTLINES */
cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
writeq(cfg, (lmt_map_base + (index + 8)));
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@ -52,7 +63,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}

View File

@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
struct mlx4_db *db, int order)
struct mlx4_db *db, unsigned int order)
{
int o;
unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@ -712,7 +712,7 @@ found:
return 0;
}
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;

View File

@ -445,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
if (unlikely(!napi_budget) && cq->type == TX_XDP)
return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);

View File

@ -58,6 +58,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@ -615,6 +616,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
@ -642,6 +645,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
if (mlx5_core_is_ecpf(mdev))
netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;

View File

@ -188,6 +188,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
if (skb_linearize(skb))
goto out;
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;

View File

@ -160,8 +160,13 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
/* bit 1-63 are not supported for NICs,
* hence read only bit 0 (asic) from lsb.
*/
value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
if (net_ratelimit())
mlx5_core_warn(events->dev,
"High temperature on sensors with bit set %llx %llx",
value_msb, value_lsb);

View File

@ -737,6 +737,7 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}

View File

@ -957,7 +957,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;

View File

@ -5185,6 +5185,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));

View File

@ -957,7 +957,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
reg |= 1 << H3_EPHY_ADDR_SHIFT;
reg |= ret << H3_EPHY_ADDR_SHIFT;
} else {
/* For SoCs without internal PHY the PHY selection bit should be
* set to 0 (external PHY).

View File

@ -1418,6 +1418,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between

View File

@ -1488,8 +1488,7 @@ static u8 mcps_data_request(
command.pdata.data_req.src_addr_mode = src_addr_mode;
command.pdata.data_req.dst.mode = dst_address_mode;
if (dst_address_mode != MAC_MODE_NO_ADDR) {
command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
command.pdata.data_req.dst.address[0] = LS_BYTE(
dst_addr->short_address
@ -1838,12 +1837,12 @@ static int ca8210_skb_rx(
}
hdr.source.mode = data_ind[0];
dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
hdr.source.pan_id = *(u16 *)&data_ind[1];
hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
hdr.dest.mode = data_ind[11];
dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
hdr.dest.pan_id = *(u16 *)&data_ind[12];
hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@ -1970,7 +1969,7 @@ static int ca8210_skb_tx(
status = mcps_data_request(
header.source.mode,
header.dest.mode,
header.dest.pan_id,
le16_to_cpu(header.dest.pan_id),
(union macaddr *)&header.dest.extended_addr,
skb->len - mac_len,
&skb->data[mac_len],

View File

@ -9853,6 +9853,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{ USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};

View File

@ -334,9 +334,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
@ -542,8 +542,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni);
if (f && f->used != jiffies)
f->used = jiffies;
if (f && READ_ONCE(f->used) != jiffies)
WRITE_ONCE(f->used, jiffies);
return f;
}
@ -1073,12 +1073,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
!(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
if (f->state != state) {
f->state = state;
f->updated = jiffies;
WRITE_ONCE(f->updated, jiffies);
notify = 1;
}
if (f->flags != fdb_flags) {
f->flags = fdb_flags;
f->updated = jiffies;
WRITE_ONCE(f->updated, jiffies);
notify = 1;
}
}
@ -1112,7 +1112,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
if (ndm_flags & NTF_USE)
f->used = jiffies;
WRITE_ONCE(f->used, jiffies);
if (notify) {
if (rd == NULL)
@ -1525,7 +1525,7 @@ static bool vxlan_snoop(struct net_device *dev,
src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
f->updated = jiffies;
WRITE_ONCE(f->updated, jiffies);
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
@ -3000,7 +3000,7 @@ static void vxlan_cleanup(struct timer_list *t)
if (f->flags & NTF_EXT_LEARNED)
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",

View File

@ -639,7 +639,9 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
of_get_mac_address(np, common->macaddr);
ret = of_get_mac_address(np, common->macaddr);
if (ret == -EPROBE_DEFER)
return ret;
return 0;
}

View File

@ -345,6 +345,7 @@ struct mt76_hw_cap {
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
#define MT_DRV_AMSDU_OFFLOAD BIT(5)
#define MT_DRV_IGNORE_TXS_FAILED BIT(6)
struct mt76_driver_ops {
u32 drv_flags;

View File

@ -151,7 +151,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
MT_DRV_SW_RX_AIRTIME,
MT_DRV_SW_RX_AIRTIME |
MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,

Some files were not shown because too many files have changed in this diff Show More