Merge branch 'v6.1/standard/base' into v6.1/standard/preempt-rt/sdkv5.10/axxia

This commit is contained in:
Bruce Ashfield 2025-06-06 11:25:58 -04:00
commit f089114d08
354 changed files with 4229 additions and 2031 deletions

View File

@ -5792,6 +5792,8 @@
Selecting 'on' will also enable the mitigation
against user space to user space task attacks.
Selecting specific mitigation does not force enable
user mitigations.
Selecting 'off' will disable both the kernel and
the user space protections.

View File

@ -100,4 +100,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO.
.. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
:identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
mctrl_gpio_disable_ms
mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync

View File

@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard
=============================== ======= =======================================
Name Perm Description
=============================== ======= =======================================
fan[1-3]_input RO Fan speed in RPM.
fan[1-3]_label RO Fan label.
fan[1-3]_min RO Minimal Fan speed in RPM
fan[1-3]_max RO Maximal Fan speed in RPM
fan[1-3]_target RO Expected Fan speed in RPM
pwm[1-3] RW Control the fan PWM duty-cycle.
fan[1-4]_input RO Fan speed in RPM.
fan[1-4]_label RO Fan label.
fan[1-4]_min RO Minimal Fan speed in RPM
fan[1-4]_max RO Maximal Fan speed in RPM
fan[1-4]_target RO Expected Fan speed in RPM
pwm[1-4] RW Control the fan PWM duty-cycle.
pwm1_enable WO Enable or disable automatic BIOS fan
control (not supported on all laptops,
see below for details).
@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches!
---------------------------
The driver also exports the fans as thermal cooling devices with
``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control
``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control
using one of the thermal governors.
Module parameters

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 140
SUBLEVEL = 141
EXTRAVERSION =
NAME = Curry Ramen
@ -875,6 +875,18 @@ ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
KBUILD_CFLAGS += -Wno-gnu
# Clang may emit a warning when a const variable, such as the dummy variables
# in typecheck(), or const member of an aggregate type are not initialized,
# which can result in unexpected behavior. However, in many audited cases of
# the "field" variant of the warning, this is intentional because the field is
# never used within a particular call path, the field is within a union with
# other non-const members, or the containing object is not const so the field
# can be modified via memcpy() / memset(). While the variable warning also gets
# disabled with this same switch, there should not be too much coverage lost
# because -Wuninitialized will still flag when an uninitialized const variable
# is used.
KBUILD_CFLAGS += $(call cc-disable-warning, default-const-init-unsafe)
else
# gcc inanely warns about local variables called 'main'

View File

@ -139,7 +139,7 @@
reg = <0x54400000 0x00040000>;
clocks = <&tegra_car TEGRA114_CLK_DSIB>,
<&tegra_car TEGRA114_CLK_DSIBLP>,
<&tegra_car TEGRA114_CLK_PLL_D2_OUT0>;
<&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 82>;
reset-names = "dsi";

View File

@ -537,11 +537,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val)
{
unsigned char modified_gray_code[] = {
0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
0x10, 0x11,
/* SYNOPSYS workaround to fix a bug in the calibration logic */
unsigned char modified_fix_code[] = {
0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
0x1e, 0x1f,
};
unsigned int tmp, index;
int i;
@ -552,25 +553,25 @@ static int at91_suspend_finish(unsigned long val)
* restore the ZQ0SR0 with the value saved here. But the
* calibration is buggy and restoring some values from ZQ0SR0
* is forbidden and risky thus we need to provide processed
* values for these (modified gray code values).
* values for these.
*/
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/*
* The 1st 8 words of memory might get corrupted in the process

View File

@ -151,28 +151,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
&r_ir {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* FIXME: We can't add that supply for now since it would
* create a circular dependency between pinctrl, the regulator
* and the RSB Bus.
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -290,6 +274,22 @@
};
};
&r_ir {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* PL0 and PL1 are used for PMIC I2C
* don't enable the pl-supply else
* it will fail at boot
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&spdif_tx_pin>;

View File

@ -175,16 +175,12 @@
vcc-pg-supply = <&reg_vcc_wifi_io>;
};
&r_ir {
status = "okay";
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -295,6 +291,10 @@
};
};
&r_ir {
status = "okay";
};
&rtc {
clocks = <&ext_osc32k>;
};

View File

@ -112,20 +112,12 @@
vcc-pg-supply = <&reg_aldo1>;
};
&r_ir {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&r_rsb {
&r_i2c {
status = "okay";
axp805: pmic@745 {
axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>;
reg = <0x36>;
interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
@ -240,6 +232,14 @@
};
};
&r_ir {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&rtc {
clocks = <&ext_osc32k>;
};

View File

@ -1631,7 +1631,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_1v8>;
};

View File

@ -421,7 +421,7 @@
no-map;
};
pil_camera_mem: mmeory@85200000 {
pil_camera_mem: memory@85200000 {
reg = <0x0 0x85200000 0x0 0x500000>;
no-map;
};

View File

@ -132,6 +132,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@ -201,6 +202,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)

View File

@ -677,7 +677,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
PUD_TYPE_TABLE)
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))

View File

@ -916,6 +916,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {

View File

@ -87,4 +87,20 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifndef __ASSEMBLY__
/*
* Some syscall entry functions on mips start with "__sys_" (fork and clone,
* for instance). We should also match the sys_ variant with those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym,
const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FTRACE_SYSCALLS */
#endif /* _ASM_MIPS_FTRACE_H */

View File

@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/*
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
/* Used to synchronize entry to deep idle states */
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
cps_nc_entry_fn entry;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
atomic_t *barrier;
/* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state];
entry = per_cpu(nc_asm_enter, cpu)[state];
if (!entry)
return -EINVAL;
@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core);
core_ready_count = per_cpu(ready_count, cpu);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Ensure ready_count is zero-initialised before the assembly runs */
WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
coupled_barrier(barrier, online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
@ -635,12 +634,14 @@ out_err:
static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int sibling, core;
void *entry_fn, *core_rc;
enum cps_pm_state state;
core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state])
if (per_cpu(nc_asm_enter, cpu)[state])
continue;
if (!test_bit(state, state_support))
continue;
@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
clear_bit(state, state_support);
}
per_cpu(nc_asm_enter, core)[state] = entry_fn;
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
}
if (!per_cpu(ready_count, core)) {
if (!per_cpu(ready_count, cpu)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count, core) = core_rc;
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(ready_count, sibling) = core_rc;
}
return 0;

View File

@ -2974,11 +2974,11 @@ static void __init fixup_device_tree_pmac(void)
char type[8];
phandle node;
// Some pmacs are missing #size-cells on escc nodes
// Some pmacs are missing #size-cells on escc or i2s nodes
for (node = 0; prom_next_node(&node); ) {
type[0] = '\0';
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "escc"))
if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
continue;
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)

View File

@ -2229,6 +2229,10 @@ static struct pmu power_pmu = {
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE)
#define SIER_TYPE_SHIFT 15
#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@ -2297,6 +2301,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0;
/*
* SIER[46-48] presents instruction type of the sampled instruction.
* In ISA v3.0 and before values "0" and "7" are considered reserved.
* In ISA v3.1, value "7" has been used to indicate "larx/stcx".
* Drop the sample if "type" has reserved values for this field with a
* ISA version check.
*/
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
ppmu->get_mem_data_src) {
val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
record = 0;
atomic64_inc(&event->lost_samples);
}
}
/*
* Finally record data if requested.
*/

View File

@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
dsrc->val = 0;
return;
}
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;

View File

@ -155,5 +155,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
-o -name '*.gcov' \) -type f -print | xargs rm -f
$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH

View File

@ -68,6 +68,7 @@ void __init mem_init(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
min_low_pfn = PFN_UP(__pa(uml_reserved));
/* this will put all low memory onto the freelists */
memblock_free_all();

View File

@ -22,6 +22,7 @@
# This script requires:
# bash
# syslinux
# genisoimage
# mtools (for fdimage* and hdimage)
# edk2/OVMF (for hdimage)
#
@ -251,7 +252,9 @@ geniso() {
cp "$isolinux" "$ldlinux" "$tmp_dir"
cp "$FBZIMAGE" "$tmp_dir"/linux
echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
cp "${FDINITRDS[@]}" "$tmp_dir"/
if [ ${#FDINITRDS[@]} -gt 0 ]; then
cp "${FDINITRDS[@]}" "$tmp_dir"/
fi
genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
-quiet -o "$FIMAGE" -b isolinux.bin \
-c boot.cat -no-emul-boot -boot-load-size 4 \

View File

@ -1216,7 +1216,8 @@ static __init int perf_ibs_op_init(void)
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
IBS_OP_CUR_CNT_EXT_MASK);
}
if (ibs_caps & IBS_CAPS_ZEN4)

View File

@ -59,6 +59,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
void stop_nmi(void);
void restart_nmi(void);
void local_touch_nmi(void);

View File

@ -456,6 +456,7 @@ struct pebs_xmm {
*/
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)

View File

@ -1382,9 +1382,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(void)
{
enum spectre_v2_user_cmd mode;
char arg[20];
int ret, i;
mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
@ -1397,7 +1401,7 @@ spectre_v2_parse_user_cmdline(void)
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg));
if (ret < 0)
return SPECTRE_V2_USER_CMD_AUTO;
return mode;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) {
@ -1407,8 +1411,8 @@ spectre_v2_parse_user_cmdline(void)
}
}
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
return SPECTRE_V2_USER_CMD_AUTO;
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
return mode;
}
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)

View File

@ -38,8 +38,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
/*
* An emergency handler can be set in any context including NMI
*/
struct nmi_desc {
raw_spinlock_t lock;
nmi_handler_t emerg_handler;
struct list_head head;
};
@ -121,9 +125,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
static int nmi_handle(unsigned int type, struct pt_regs *regs)
{
struct nmi_desc *desc = nmi_to_desc(type);
nmi_handler_t ehandler;
struct nmiaction *a;
int handled=0;
/*
* Call the emergency handler, if set
*
* In the case of crash_nmi_callback() emergency handler, it will
* return in the case of the crashing CPU to enable it to complete
* other necessary crashing actions ASAP. Other handlers in the
* linked list won't need to be run.
*/
ehandler = desc->emerg_handler;
if (ehandler)
return ehandler(type, regs);
rcu_read_lock();
/*
@ -213,6 +230,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
/**
* set_emergency_nmi_handler - Set emergency handler
* @type: NMI type
* @handler: the emergency handler to be stored
*
* Set an emergency NMI handler which, if set, will preempt all the other
* handlers in the linked list. If a NULL handler is passed in, it will clear
* it. It is expected that concurrent calls to this function will not happen
* or the system is screwed beyond repair.
*/
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
{
struct nmi_desc *desc = nmi_to_desc(type);
if (WARN_ON_ONCE(desc->emerg_handler == handler))
return;
desc->emerg_handler = handler;
/*
* Ensure the emergency handler is visible to other CPUs before
* function return
*/
smp_wmb();
}
static void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{

View File

@ -896,15 +896,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* Return what? */
/*
* Ensure the new callback function is set before sending
* out the NMI
* Set emergency handler to preempt other handlers.
*/
wmb();
set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR);

View File

@ -654,8 +654,13 @@ static void __init memory_map_top_down(unsigned long map_start,
*/
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
map_end);
memblock_phys_free(addr, PMD_SIZE);
real_end = addr + PMD_SIZE;
if (!addr) {
pr_warn("Failed to release memory for alloc_low_pages()");
real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
} else {
memblock_phys_free(addr, PMD_SIZE);
real_end = addr + PMD_SIZE;
}
/* step_size need to be small so pgt_buf from BRK could cover it */
step_size = PMD_SIZE;

View File

@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */
update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
nr_pages << PAGE_SHIFT);
/*
* Special case: add_pages() is called by memremap_pages() for adding device
* private pages. Do not bump up max_pfn in the device private path,
* because max_pfn changes affect dma_addressing_limited().
*
* dma_addressing_limited() returning true when max_pfn is the device's
* addressable memory can force device drivers to use bounce buffers
* and impact their performance negatively:
*/
if (!params->pgmap)
/* update max_pfn, max_low_pfn and high_memory */
update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret;
}

View File

@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void)
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
/* Adapt physical memory region size based on available memory */
if (memory_tb < kaslr_regions[0].size_tb)
/*
* Adapt physical memory region size based on available memory,
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
* device BAR space assuming the direct map space is large enough
* for creating a ZONE_DEVICE mapping in the direct map corresponding
* to the physical BAR address.
*/
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb;
/*

View File

@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
regs->gp[CS / sizeof(unsigned long)] |= 3;
regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
#endif
}

View File

@ -263,10 +263,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
return err;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
return err;
}

View File

@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;

View File

@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;

View File

@ -438,7 +438,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
tristate "Hardware Error Device"
bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via

View File

@ -72,7 +72,12 @@ static struct acpi_driver acpi_hed_driver = {
.notify = acpi_hed_notify,
},
};
module_acpi_driver(acpi_hed_driver);
static int __init acpi_hed_driver_init(void)
{
return acpi_bus_register_driver(&acpi_hed_driver);
}
subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");

View File

@ -594,18 +594,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
struct charlcd *charlcd_alloc(void)
struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
lcd->drvdata = priv->drvdata;
return lcd;
}

View File

@ -49,7 +49,7 @@ struct charlcd {
unsigned long y;
} addr;
void *drvdata;
void *drvdata; /* Set by charlcd_alloc() */
};
/**
@ -93,7 +93,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
struct charlcd *charlcd_alloc(void);
struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);

View File

@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev)
if (!hdc)
return -ENOMEM;
lcd = charlcd_alloc();
lcd = charlcd_alloc(0);
if (!lcd)
goto fail1;

View File

@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err < 0)
return err;
lcd = charlcd_alloc();
lcd = charlcd_alloc(0);
if (!lcd)
return -ENOMEM;

View File

@ -835,7 +835,7 @@ static void lcd_init(void)
if (!hdc)
return;
charlcd = charlcd_alloc();
charlcd = charlcd_alloc(0);
if (!charlcd) {
kfree(hdc);
return;

View File

@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -405,11 +406,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
struct imx8mp_clock_constraints {
unsigned int clkid;
u32 maxrate;
};
/*
* Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
* Table 13. Maximum frequency of modules.
* Probable typos fixed are marked with a comment.
*/
static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
{ IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
{ IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
{ IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
{ IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_AHB, 133333333 },
{ IMX8MP_CLK_IPG_ROOT, 66666667 },
{ IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
{ IMX8MP_CLK_DRAM_ALT, 666666667 },
{ IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
{ IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
{ IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
{ IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
{ IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
{ IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
{ IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
{ IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
{ IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
{ IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
{ IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
{ IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
{ IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
{ IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
{ IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
{ /* Sentinel */ }
};
static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
{
const struct imx8mp_clock_constraints *constr;
for (constr = constraints; constr->clkid; constr++)
clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
}
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@ -704,6 +845,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
err = of_property_read_string(np, "fsl,operating-mode", &opmode);
if (!err) {
if (!strcmp(opmode, "nominal"))
imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
else if (!strcmp(opmode, "overdrive"))
imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
}
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");

View File

@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.parent_data = cam_cc_parent_data_2,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.parent_data = cam_cc_parent_data_3,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};
@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
.ops = &clk_rcg2_shared_ops,
},
};

View File

@ -561,14 +561,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 alpha_width = pll_alpha_width(pll);
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
return 0;
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
return 0;
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
return 0;
if (alpha_width > 32) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
&high);
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
&high))
return 0;
a = (u64)high << 32 | low;
} else {
a = low & GENMASK(alpha_width - 1, 0);
@ -760,8 +765,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
return 0;
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
return 0;
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
@ -955,8 +963,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
return 0;
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@ -1014,7 +1025,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
return 0;
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK(pll);
@ -1230,8 +1242,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
return 0;
if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@ -1381,7 +1396,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct regmap *regmap = pll->clkr.regmap;
u32 i, div = 1, val;
regmap_read(regmap, PLL_USER_CTL(pll), &val);
if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
return 0;
val >>= pll->post_div_shift;
val &= PLL_POST_DIV_MASK(pll);
@ -2254,9 +2270,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
struct regmap *regmap = pll->clkr.regmap;
u32 l, frac;
regmap_read(regmap, PLL_L_VAL(pll), &l);
if (regmap_read(regmap, PLL_L_VAL(pll), &l))
return 0;
l &= LUCID_EVO_PLL_L_VAL_MASK;
regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
}
@ -2331,7 +2350,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
return 0;
return parent_rate * l;
}

View File

@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
mmc0_mmc1_parents, 0x830,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
mmc0_mmc1_parents, 0x834,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
0);
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
0x838,
0, 4, /* M */
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);

View File

@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
_reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
_muxshift, _muxwidth, \
_gate, _postdiv, _flags)\
struct ccu_mp _struct = { \
.enable = _gate, \
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
.p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
.mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
.fixed_post_div = _postdiv, \
.common = { \
.reg = _reg, \
.features = CCU_FEATURE_FIXED_POSTDIV, \
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
_parents, \
&ccu_mp_ops, \
_flags), \
} \
}
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \

View File

@ -114,6 +114,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu)
{
/* Ensure the GIC counter is running */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
@ -248,9 +251,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
pr_warn("Unable to register clock notifier\n");
}
/* And finally start the counter */
clear_gic_config(GIC_CONFIG_COUNTSTOP);
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with

View File

@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
/* set same policy for all cpus in a cluster */
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
if (data->cpus[cpu].bpmp_cluster_id == cluster)
cpumask_set_cpu(cpu, policy->cpus);
}
return 0;
}

View File

@ -249,8 +249,19 @@ again:
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
if ((divisor * 4) <= INTERVALS * 3)
if (divisor * 4 <= INTERVALS * 3) {
/*
* If there are sufficiently many data points still under
* consideration after the outliers have been eliminated,
* returning without a prediction would be a mistake because it
* is likely that the next interval will not exceed the current
* maximum, so return the latter in that case.
*/
if (divisor >= INTERVALS / 2)
return max;
return UINT_MAX;
}
thresh = max - 1;
goto again;

View File

@ -410,9 +410,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
dev_err(&pdev->dev,
"Request failed with software error code 0x%x\n",
cpt_status->s.uc_compcode);
pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
cpt_status->s.uc_compcode,
info->req->areq->tfm->__crt_alg->cra_name,
info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}

View File

@ -12,7 +12,9 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/iommu.h>
#include <linux/highmem.h>
#include <uapi/linux/idxd.h>
#include <linux/xarray.h>
#include "registers.h"
#include "idxd.h"
@ -35,6 +37,7 @@ struct idxd_user_context {
struct idxd_wq *wq;
struct task_struct *task;
unsigned int pasid;
struct mm_struct *mm;
unsigned int flags;
struct iommu_sva *sva;
};
@ -69,6 +72,19 @@ static inline struct idxd_wq *inode_wq(struct inode *inode)
return idxd_cdev->wq;
}
static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
{
struct idxd_wq *wq = ctx->wq;
void *ptr;
mutex_lock(&wq->uc_lock);
ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
if (ptr != (void *)ctx)
dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
ctx->pasid);
mutex_unlock(&wq->uc_lock);
}
static int idxd_cdev_open(struct inode *inode, struct file *filp)
{
struct idxd_user_context *ctx;
@ -109,20 +125,25 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
rc = -EINVAL;
goto failed;
goto failed_get_pasid;
}
ctx->sva = sva;
ctx->pasid = pasid;
ctx->mm = current->mm;
mutex_lock(&wq->uc_lock);
rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
mutex_unlock(&wq->uc_lock);
if (rc < 0)
dev_warn(dev, "PASID entry already exist in xarray.\n");
if (wq_dedicated(wq)) {
rc = idxd_wq_set_pasid(wq, pasid);
if (rc < 0) {
iommu_sva_unbind_device(sva);
dev_err(dev, "wq set pasid failed: %d\n", rc);
goto failed;
goto failed_set_pasid;
}
}
}
@ -131,7 +152,13 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
mutex_unlock(&wq->wq_lock);
return 0;
failed:
failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
if (device_user_pasid_enabled(idxd))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
kfree(ctx);
return rc;
@ -162,8 +189,10 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
}
}
if (ctx->sva)
if (ctx->sva) {
iommu_sva_unbind_device(ctx->sva);
idxd_xa_pasid_remove(ctx);
}
kfree(ctx);
mutex_lock(&wq->wq_lock);
idxd_wq_put(wq);
@ -210,6 +239,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
if (current->mm != ctx->mm)
return -EPERM;
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@ -276,6 +308,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
if (current->mm != ctx->mm)
return -EPERM;
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@ -296,6 +331,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
if (current->mm != ctx->mm)
return POLLNVAL;
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
@ -408,6 +446,13 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
}
mutex_lock(&wq->wq_lock);
wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
if (!wq->wq) {
rc = -ENOMEM;
goto wq_err;
}
wq->type = IDXD_WQT_USER;
rc = drv_enable_wq(wq);
if (rc < 0)
@ -426,7 +471,9 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
err_cdev:
drv_disable_wq(wq);
err:
destroy_workqueue(wq->wq);
wq->type = IDXD_WQT_NONE;
wq_err:
mutex_unlock(&wq->wq_lock);
return rc;
}
@ -439,6 +486,8 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
idxd_wq_del_cdev(wq);
drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
destroy_workqueue(wq->wq);
wq->wq = NULL;
mutex_unlock(&wq->wq_lock);
}
@ -485,3 +534,70 @@ void idxd_cdev_remove(void)
ida_destroy(&ictx[i].minor_ida);
}
}
/**
* idxd_copy_cr - copy completion record to user address space found by wq and
* PASID
* @wq: work queue
* @pasid: PASID
* @addr: user fault address to write
* @cr: completion record
* @len: number of bytes to copy
*
* This is called by a work that handles completion record fault.
*
* Return: number of bytes copied.
*/
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
void *cr, int len)
{
struct device *dev = &wq->idxd->pdev->dev;
int left = len, status_size = 1;
struct idxd_user_context *ctx;
struct mm_struct *mm;
mutex_lock(&wq->uc_lock);
ctx = xa_load(&wq->upasid_xa, pasid);
if (!ctx) {
dev_warn(dev, "No user context\n");
goto out;
}
mm = ctx->mm;
/*
* The completion record fault handling work is running in kernel
* thread context. It temporarily switches to the mm to copy cr
* to addr in the mm.
*/
kthread_use_mm(mm);
left = copy_to_user((void __user *)addr + status_size, cr + status_size,
len - status_size);
/*
* Copy status only after the rest of completion record is copied
* successfully so that the user gets the complete completion record
* when a non-zero status is polled.
*/
if (!left) {
u8 status;
/*
* Ensure that the completion record's status field is written
* after the rest of the completion record has been written.
* This ensures that the user receives the correct completion
* record information once polling for a non-zero status.
*/
wmb();
status = *(u8 *)cr;
if (put_user(status, (u8 __user *)addr))
left += status_size;
} else {
left += status_size;
}
kthread_unuse_mm(mm);
out:
mutex_unlock(&wq->uc_lock);
return len - left;
}

View File

@ -185,6 +185,7 @@ struct idxd_wq {
struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct workqueue_struct *wq;
struct idxd_device *idxd;
int id;
struct idxd_irq_entry ie;
@ -214,6 +215,10 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
/* Lock to protect upasid_xa access. */
struct mutex uc_lock;
struct xarray upasid_xa;
};
struct idxd_engine {
@ -665,6 +670,8 @@ void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
void *buf, int len);
/* perfmon */
#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)

View File

@ -218,6 +218,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
mutex_init(&wq->uc_lock);
xa_init(&wq->upasid_xa);
idxd->wqs[i] = wq;
}

View File

@ -1315,6 +1315,7 @@ static void idxd_conf_wq_release(struct device *dev)
bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
xa_destroy(&wq->upasid_xa);
kfree(wq);
}

View File

@ -405,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
int i, j, ret;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
u32 addr_decode, mad_offset;
u32 addr_decode[IE31200_CHANNELS], mad_offset;
/*
* Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
@ -466,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mad_offset = IE31200_MAD_DIMM_0_OFFSET;
}
/* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
addr_decode = readl(window + mad_offset +
addr_decode[i] = readl(window + mad_offset +
(i * 4));
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
populate_dimm_info(&dimm_info[i][j], addr_decode, j,
skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info[i][j].size,
dimm_info[i][j].dual_rank,
dimm_info[i][j].x16_width);
}
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]);
}
/*
@ -489,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
*/
for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
for (j = 0; j < IE31200_CHANNELS; j++) {
struct dimm_data dimm_info;
struct dimm_info *dimm;
unsigned long nr_pages;
nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
populate_dimm_info(&dimm_info, addr_decode[j], i,
skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info.size,
dimm_info.dual_rank,
dimm_info.x16_width);
nr_pages = IE31200_PAGES(dimm_info.size, skl);
if (nr_pages == 0)
continue;
if (dimm_info[j][i].dual_rank) {
if (dimm_info.dual_rank) {
nr_pages = nr_pages / 2;
dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;

View File

@ -191,6 +191,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;

View File

@ -52,7 +52,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
#define V2_CREDIT_TIMEOUT_US 20000
#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000

View File

@ -10,8 +10,9 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/gpio/driver.h>
#include <linux/cleanup.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@ -20,6 +21,7 @@
#include <linux/platform_data/pca953x.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@ -522,12 +524,10 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
mutex_lock(&chip->i2c_lock);
ret = regmap_write_bits(chip->regmap, dirreg, bit, bit);
mutex_unlock(&chip->i2c_lock);
return ret;
guard(mutex)(&chip->i2c_lock);
return regmap_write_bits(chip->regmap, dirreg, bit, bit);
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@ -539,17 +539,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
u8 bit = BIT(off % BANK_SZ);
int ret;
mutex_lock(&chip->i2c_lock);
guard(mutex)(&chip->i2c_lock);
/* set output level */
ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
if (ret)
goto exit;
return ret;
/* then direction */
ret = regmap_write_bits(chip->regmap, dirreg, bit, 0);
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
return regmap_write_bits(chip->regmap, dirreg, bit, 0);
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@ -560,9 +558,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
u32 reg_val;
int ret;
mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
scoped_guard(mutex, &chip->i2c_lock)
ret = regmap_read(chip->regmap, inreg, &reg_val);
if (ret < 0)
return ret;
@ -575,9 +572,9 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
guard(mutex)(&chip->i2c_lock);
regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
mutex_unlock(&chip->i2c_lock);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
@ -588,9 +585,8 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
u32 reg_val;
int ret;
mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, dirreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
scoped_guard(mutex, &chip->i2c_lock)
ret = regmap_read(chip->regmap, dirreg, &reg_val);
if (ret < 0)
return ret;
@ -607,9 +603,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
DECLARE_BITMAP(reg_val, MAX_LINE);
int ret;
mutex_lock(&chip->i2c_lock);
ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
mutex_unlock(&chip->i2c_lock);
scoped_guard(mutex, &chip->i2c_lock)
ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
if (ret)
return ret;
@ -624,16 +619,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
DECLARE_BITMAP(reg_val, MAX_LINE);
int ret;
mutex_lock(&chip->i2c_lock);
guard(mutex)(&chip->i2c_lock);
ret = pca953x_read_regs(chip, chip->regs->output, reg_val);
if (ret)
goto exit;
return;
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
pca953x_write_regs(chip, chip->regs->output, reg_val);
exit:
mutex_unlock(&chip->i2c_lock);
}
static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
@ -641,7 +635,6 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned long config)
{
enum pin_config_param param = pinconf_to_config_param(config);
u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset);
u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
@ -654,7 +647,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
if (!(chip->driver_data & PCA_PCAL))
return -ENOTSUPP;
mutex_lock(&chip->i2c_lock);
guard(mutex)(&chip->i2c_lock);
/* Configure pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_PULL_UP)
@ -664,17 +657,13 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
else
ret = 0;
if (ret)
goto exit;
return ret;
/* Disable/Enable pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_DISABLE)
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
else
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
}
static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@ -887,10 +876,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
bitmap_zero(pending, MAX_LINE);
mutex_lock(&chip->i2c_lock);
ret = pca953x_irq_pending(chip, pending);
mutex_unlock(&chip->i2c_lock);
scoped_guard(mutex, &chip->i2c_lock)
ret = pca953x_irq_pending(chip, pending);
if (ret) {
ret = 0;
@ -1199,9 +1186,9 @@ static void pca953x_remove(struct i2c_client *client)
}
#ifdef CONFIG_PM_SLEEP
static int pca953x_regcache_sync(struct device *dev)
static int pca953x_regcache_sync(struct pca953x_chip *chip)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
struct device *dev = &chip->client->dev;
int ret;
u8 regaddr;
@ -1248,13 +1235,38 @@ static int pca953x_regcache_sync(struct device *dev)
return 0;
}
static int pca953x_restore_context(struct pca953x_chip *chip)
{
int ret;
guard(mutex)(&chip->i2c_lock);
if (chip->client->irq > 0)
enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
if (ret)
return ret;
return regcache_sync(chip->regmap);
}
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
/* Disable IRQ to prevent early triggering while regmap "cache only" is on */
if (chip->client->irq > 0)
disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}
static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
mutex_unlock(&chip->i2c_lock);
pca953x_save_context(chip);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@ -1277,17 +1289,7 @@ static int pca953x_resume(struct device *dev)
}
}
mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
if (ret) {
mutex_unlock(&chip->i2c_lock);
return ret;
}
ret = regcache_sync(chip->regmap);
mutex_unlock(&chip->i2c_lock);
ret = pca953x_restore_context(chip);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;

View File

@ -42,6 +42,29 @@
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
/**
* dma_buf_attach_adev - Helper to get adev of an attachment
*
* @attach: attachment
*
* Returns:
* A struct amdgpu_device * if the attaching device is an amdgpu device or
* partition, NULL otherwise.
*/
static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
{
if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
struct drm_gem_object *obj = attach->importer_priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
return amdgpu_ttm_adev(bo->tbo.bdev);
}
return NULL;
}
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@ -53,12 +76,14 @@
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@ -479,6 +504,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
if (!adev)
return false;
if (obj->import_attach) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;

View File

@ -43,7 +43,7 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
@ -484,7 +484,6 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
if (psp->sos_fw) {
@ -511,8 +510,8 @@ static int psp_sw_fini(void *handle)
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
kfree(cmd);
cmd = NULL;
kfree(psp->cmd);
psp->cmd = NULL;
psp_free_shared_bufs(psp);

View File

@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);

View File

@ -813,6 +813,14 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@ -825,14 +833,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (process) {
pr_debug("Process already found\n");
} else {
/* If the process just called exec(3), it is possible that the
* cleanup of the kfd_process (following the release of the mm
* of the old process image) is still in the cleanup work queue.
* Make sure to drain any job before trying to recreate any
* resource for this process.
*/
flush_workqueue(kfd_process_wq);
process = create_process(thread);
if (IS_ERR(process))
goto out;

View File

@ -2897,11 +2897,6 @@ static int dm_resume(void *handle)
return 0;
}
/* leave display off for S4 sequence */
if (adev->in_s4)
return 0;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
@ -7281,7 +7276,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
if (!ddc_service->ddc_pin)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);

View File

@ -116,7 +116,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@ -192,15 +192,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
dcn315_disable_otg_wa(clk_mgr_base, context, true);
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
(new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn315_disable_otg_wa(clk_mgr_base, context, true);
/* Clamp the requested clock to PMFW based on their limit. */
if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
requested_dispclk_khz = dc->debug.min_disp_clk_khz;
dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
dcn315_disable_otg_wa(clk_mgr_base, context, false);
dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}

View File

@ -153,7 +153,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@ -226,11 +226,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
(new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
/* Clamp the requested clock to PMFW based on their limit. */
if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
requested_dispclk_khz = dc->debug.min_disp_clk_khz;
dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;

View File

@ -267,6 +267,7 @@ static bool create_links(
link->link_id.type = OBJECT_TYPE_CONNECTOR;
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {

View File

@ -392,11 +392,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@ -434,6 +429,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
// Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
if (scl_data->viewport.width > scl_data->h_active &&
dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
return false;
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);

View File

@ -1717,7 +1717,7 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
if (pixel_rate_crb) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
@ -1768,28 +1768,26 @@ static int dcn315_populate_dml_pipes_from_context(
continue;
}
if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
/* Clamp to 2 pipe split max det segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
}
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
/* If we are splitting we must have an even number of segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
/* Convert segments into size for DML use */
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
crb_idx++;
if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
/* Clamp to 2 pipe split max det segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
}
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
/* If we are splitting we must have an even number of segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
/* Convert segments into size for DML use */
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
crb_idx++;
pipe_cnt++;
}
}

View File

@ -103,7 +103,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return false;
}
switch (mode->crtc_hdisplay) {
switch (mode->hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
@ -117,7 +117,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
break;
case 1280:
if (mode->crtc_vdisplay == 800)
if (mode->vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@ -129,7 +129,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
if (mode->crtc_vdisplay == 900)
if (mode->vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@ -138,7 +138,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
if (mode->crtc_vdisplay == 1080)
if (mode->vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@ -182,6 +182,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde;
adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
@ -191,6 +192,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table->hfp +
vbios_mode->enh_table->hsync);
adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde;
adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;

View File

@ -573,6 +573,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
struct drm_encoder *drm_enc;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
if (!drm_enc->possible_clones) {
DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
continue;
}
if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
crtc_state->encoder_mask) {
DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
crtc->base.id, crtc_state->encoder_mask);
return -EINVAL;
}
}
return 0;
}
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@ -744,6 +768,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
ret = drm_atomic_check_valid_clones(state, crtc);
if (ret != 0)
return ret;
}
/*

View File

@ -6164,6 +6164,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;

View File

@ -406,12 +406,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
if (dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
if (dpi->conf->edge_sel_en)
if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}

View File

@ -1944,6 +1944,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
{ /* sentinal */ }

View File

@ -1289,10 +1289,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
rb_swap = vop2_win_rb_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
if (!vop2_cluster_window(win)) {
uv_swap = vop2_win_uv_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
}
uv_swap = vop2_win_uv_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
if (fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));

View File

@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
#define USB_VENDOR_ID_ADATA_XPG 0x125f
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155

View File

@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },

View File

@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));

View File

@ -67,7 +67,7 @@
#define I8K_POWER_BATTERY 0x01
#define DELL_SMM_NO_TEMP 10
#define DELL_SMM_NO_FANS 3
#define DELL_SMM_NO_FANS 4
struct dell_smm_data {
struct mutex i8k_mutex; /* lock for sensors writes */
@ -940,11 +940,14 @@ static const struct hwmon_channel_info *dell_smm_info[] = {
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT
),
NULL

View File

@ -392,7 +392,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= fan_data->num_speed)
return -EINVAL;
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, state);
mutex_unlock(&fan_data->lock);
return 0;
}
@ -488,7 +493,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data)
{
struct gpio_fan_data *fan_data = data;
mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
mutex_unlock(&fan_data->lock);
}
static int gpio_fan_probe(struct platform_device *pdev)
@ -561,7 +570,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, 0);
mutex_unlock(&fan_data->lock);
}
return 0;
@ -571,8 +582,11 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->gpios)
if (fan_data->gpios) {
mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, fan_data->resume_speed);
mutex_unlock(&fan_data->lock);
}
return 0;
}

View File

@ -111,7 +111,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr;
void *pcc_comm_addr;
u64 usecs_lat;
unsigned int usecs_lat;
};
/*

View File

@ -1508,7 +1508,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.name);
}
clk_prepare_enable(i2c->clk);
ret = clk_prepare_enable(i2c->clk);
if (ret)
return dev_err_probe(&dev->dev, ret,
"failed to enable clock\n");
if (i2c->use_pio) {
i2c->adap.algo = &i2c_pxa_pio_algorithm;

View File

@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@ -150,6 +151,8 @@
/* TAG length for DATA READ in RX FIFO */
#define READ_RX_TAGS_LEN 2
#define QUP_BUS_WIDTH 8
static unsigned int scl_freq;
module_param_named(scl_freq, scl_freq, uint, 0444);
MODULE_PARM_DESC(scl_freq, "SCL frequency override");
@ -227,6 +230,7 @@ struct qup_i2c_dev {
int irq;
struct clk *clk;
struct clk *pclk;
struct icc_path *icc_path;
struct i2c_adapter adap;
int clk_ctl;
@ -255,6 +259,10 @@ struct qup_i2c_dev {
/* To configure when bus is in run state */
u32 config_run;
/* bandwidth votes */
u32 src_clk_freq;
u32 cur_bw_clk_freq;
/* dma parameters */
bool is_dma;
/* To check if the current transfer is using DMA */
@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
return ret;
}
static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq)
{
u32 needed_peak_bw;
int ret;
if (qup->cur_bw_clk_freq == clk_freq)
return 0;
needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH);
ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw);
if (ret)
return ret;
qup->cur_bw_clk_freq = clk_freq;
return 0;
}
static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
{
struct qup_i2c_block *blk = &qup->blk;
@ -840,6 +865,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret = 0;
int idx = 0;
ret = qup_i2c_vote_bw(qup, qup->src_clk_freq);
if (ret)
return ret;
enable_irq(qup->irq);
ret = qup_i2c_req_dma(qup);
@ -1645,6 +1674,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
config = readl(qup->base + QUP_CONFIG);
config |= QUP_CLOCK_AUTO_GATE;
writel(config, qup->base + QUP_CONFIG);
qup_i2c_vote_bw(qup, 0);
clk_disable_unprepare(qup->pclk);
}
@ -1745,6 +1775,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
goto fail_dma;
}
qup->is_dma = true;
qup->icc_path = devm_of_icc_get(&pdev->dev, NULL);
if (IS_ERR(qup->icc_path))
return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path),
"failed to get interconnect path\n");
}
nodma:
@ -1793,6 +1828,7 @@ nodma:
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
}
qup->src_clk_freq = src_clk_freq;
/*
* Bootloaders might leave a pending interrupt on certain QUP's,

View File

@ -495,6 +495,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
svc_i3c_master_emit_stop(master);
break;
default:
break;
}
@ -832,6 +834,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u32 reg;
int ret, i;
svc_i3c_master_flush_fifo(master);
while (true) {
/* Enter/proceed with DAA */
writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |

View File

@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
struct scatterlist *sg;
unsigned long curr_len = 0;
dma_addr_t curr_base = ~0;
unsigned long va, pgoff;
struct scatterlist *sg;
dma_addr_t mask;
dma_addr_t end;
int i;
umem->iova = va = virt;
@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
pgoff = umem->address & ~PAGE_MASK;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/* Walk SGL and reduce max page size if VA/PA bits differ
* for any address.
/* If the current entry is physically contiguous with the previous
* one, no need to take its start addresses into consideration.
*/
mask |= (sg_dma_address(sg) + pgoff) ^ va;
if (check_add_overflow(curr_base, curr_len, &end) ||
end != sg_dma_address(sg)) {
curr_base = sg_dma_address(sg);
curr_len = 0;
/* Reduce max page size if VA/PA bits differ */
mask |= (curr_base + pgoff) ^ va;
/* The alignment of any VA matching a discontinuity point
* in the physical memory sets the maximum possible page
* size as this must be a starting point of a new page that
* needs to be aligned.
*/
if (i != 0)
mask |= va;
}
curr_len += sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
/* Except for the last entry, the ending iova alignment sets
* the maximum possible page size as the low bits of the iova
* must be zero when starting the next chunk.
*/
if (i != (umem->sgt_append.sgt.nents - 1))
mask |= va;
pgoff = 0;
}

View File

@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
goto err_free;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_free;
}
@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
if (cmd.flags & IB_MR_REREG_PD) {
new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
attrs);
if (!new_pd) {
ret = -EINVAL;
if (IS_ERR(new_pd)) {
ret = PTR_ERR(new_pd);
goto put_uobjs;
}
} else {
@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
return PTR_ERR(uobj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_free;
}
@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = attrs->ucore.outbuf;
@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ind_tbl = uobj_get_obj_read(rwq_ind_table,
UVERBS_OBJECT_RWQ_IND_TBL,
cmd->rwq_ind_tbl_handle, attrs);
if (!ind_tbl) {
ret = -EINVAL;
if (IS_ERR(ind_tbl)) {
ret = PTR_ERR(ind_tbl);
goto err_put;
}
@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->is_srq) {
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
cmd->srq_handle, attrs);
if (!srq || srq->srq_type == IB_SRQT_XRC) {
ret = -EINVAL;
if (IS_ERR(srq) ||
srq->srq_type == IB_SRQT_XRC) {
ret = IS_ERR(srq) ? PTR_ERR(srq) :
-EINVAL;
goto err_put;
}
}
@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rcq = uobj_get_obj_read(
cq, UVERBS_OBJECT_CQ,
cmd->recv_cq_handle, attrs);
if (!rcq) {
ret = -EINVAL;
if (IS_ERR(rcq)) {
ret = PTR_ERR(rcq);
goto err_put;
}
}
}
}
if (has_sq)
if (has_sq) {
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
if (IS_ERR(scq)) {
ret = PTR_ERR(scq);
goto err_put;
}
}
if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_put;
}
@ -1482,18 +1490,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
err_put:
if (!IS_ERR(xrcd_uobj))
uobj_put_read(xrcd_uobj);
if (pd)
if (!IS_ERR_OR_NULL(pd))
uobj_put_obj_read(pd);
if (scq)
if (!IS_ERR_OR_NULL(scq))
rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (rcq && rcq != scq)
if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (srq)
if (!IS_ERR_OR_NULL(srq))
rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
if (ind_tbl)
if (!IS_ERR_OR_NULL(ind_tbl))
uobj_put_obj_read(ind_tbl);
uobj_alloc_abort(&obj->uevent.uobject, attrs);
@ -1655,8 +1663,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -1761,8 +1769,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2027,8 +2035,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2065,9 +2073,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
user_wr->wr.ud.ah, attrs);
if (!ud->ah) {
if (IS_ERR(ud->ah)) {
ret = PTR_ERR(ud->ah);
kfree(ud);
ret = -EINVAL;
goto out_put;
}
ud->remote_qpn = user_wr->wr.ud.remote_qpn;
@ -2304,8 +2312,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
ret = -EINVAL;
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
goto out;
}
@ -2355,8 +2363,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq) {
ret = -EINVAL;
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
goto out;
}
@ -2412,8 +2420,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err;
}
@ -2482,8 +2490,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp)
return -EINVAL;
if (IS_ERR(qp))
return PTR_ERR(qp);
obj = qp->uobject;
@ -2532,8 +2540,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp)
return -EINVAL;
if (IS_ERR(qp))
return PTR_ERR(qp);
obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
@ -2667,8 +2675,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
attrs);
if (!ib_spec->action.act)
return -EINVAL;
if (IS_ERR(ib_spec->action.act))
return PTR_ERR(ib_spec->action.act);
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
flow_resources_add(uflow_res,
@ -2685,8 +2693,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
attrs);
if (!ib_spec->flow_count.counters)
return -EINVAL;
if (IS_ERR(ib_spec->flow_count.counters))
return PTR_ERR(ib_spec->flow_count.counters);
ib_spec->flow_count.size =
sizeof(struct ib_flow_spec_action_count);
flow_resources_add(uflow_res,
@ -2904,14 +2912,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return PTR_ERR(obj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
if (!pd) {
err = -EINVAL;
if (IS_ERR(pd)) {
err = PTR_ERR(pd);
goto err_uobj;
}
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq) {
err = -EINVAL;
if (IS_ERR(cq)) {
err = PTR_ERR(cq);
goto err_put_pd;
}
@ -3012,8 +3020,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
return -EINVAL;
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
if (!wq)
return -EINVAL;
if (IS_ERR(wq))
return PTR_ERR(wq);
if (cmd.attr_mask & IB_WQ_FLAGS) {
wq_attr.flags = cmd.flags;
@ -3096,8 +3104,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
num_read_wqs++) {
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
wqs_handles[num_read_wqs], attrs);
if (!wq) {
err = -EINVAL;
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
goto put_wqs;
}
@ -3252,8 +3260,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
if (!qp) {
err = -EINVAL;
if (IS_ERR(qp)) {
err = PTR_ERR(qp);
goto err_uobj;
}
@ -3399,15 +3407,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
if (ib_srq_has_cq(cmd->srq_type)) {
attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->cq_handle, attrs);
if (!attr.ext.cq) {
ret = -EINVAL;
if (IS_ERR(attr.ext.cq)) {
ret = PTR_ERR(attr.ext.cq);
goto err_put_xrcd;
}
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
if (!pd) {
ret = -EINVAL;
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
goto err_put_cq;
}
@ -3514,8 +3522,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq)
return -EINVAL;
if (IS_ERR(srq))
return PTR_ERR(srq);
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
@ -3542,8 +3550,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
if (!srq)
return -EINVAL;
if (IS_ERR(srq))
return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
@ -3668,8 +3676,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
return -EOPNOTSUPP;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
if (!cq)
return -EINVAL;
if (IS_ERR(cq))
return PTR_ERR(cq);
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);

View File

@ -2959,22 +2959,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
unsigned int sg_delta;
unsigned int delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
delta = BIT_ULL(biter->__pg_bit) - block_offset;
if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
biter->__sg_advance += sg_delta;
} else {
while (biter->__sg_nents && biter->__sg &&
sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}
biter->__sg_advance += delta;
return true;
}

View File

@ -264,7 +264,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
map_size = get_alloc_page_size(pgsize);
pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
if (!pte) {
ret = -EINVAL;
ret = -ENOMEM;
goto out;
}

View File

@ -1661,7 +1661,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
msi_desc_set_iommu_msi_iova(desc, 0, 0);
return 0;
}
@ -1673,11 +1673,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);
if (!msi_page)
return -ENOMEM;
msi_desc_set_iommu_msi_iova(
desc, msi_page->iova,
ilog2(cookie_msi_granule(domain->iova_cookie)));
return 0;
}
@ -1688,18 +1689,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
*/
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
const struct iommu_dma_msi_page *msi_page;
#ifdef CONFIG_IRQ_MSI_IOMMU
if (desc->iommu_msi_shift) {
u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
msi_page = msi_desc_get_iommu_cookie(desc);
if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
return;
msg->address_hi = upper_32_bits(msi_page->iova);
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
msg->address_hi = upper_32_bits(msi_iova);
msg->address_lo = lower_32_bits(msi_iova) |
(msg->address_lo & ((1 << desc->iommu_msi_shift) - 1));
}
#endif
}
static int iommu_dma_init(void)

View File

@ -135,8 +135,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
/* init the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
fwnode_property_read_u32(mcnode, "max-brightness",
ret = fwnode_property_read_u32(mcnode, "max-brightness",
&cdev->max_brightness);
if (ret)
goto release_mcnode;
cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = led_pwm_mc_set;

View File

@ -350,11 +350,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
mutex_lock(&con_mutex);
if (of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", index, &spec)) {
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-ENODEV);
return ERR_PTR(ret);
}
chan = ERR_PTR(-EPROBE_DEFER);

View File

@ -2875,6 +2875,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
return to_cblock(size);
}
static bool can_resume(struct cache *cache)
{
/*
* Disallow retrying the resume operation for devices that failed the
* first resume attempt, as the failure leaves the policy object partially
* initialized. Retrying could trigger BUG_ON when loading cache mappings
* into the incomplete policy object.
*/
if (cache->sized && !cache->loaded_mappings) {
if (get_cache_mode(cache) != CM_WRITE)
DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
cache_device_name(cache));
else
DMERR("%s: unable to resume cache due to missing proper cache table reload",
cache_device_name(cache));
return false;
}
return true;
}
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@ -2923,6 +2944,9 @@ static int cache_preresume(struct dm_target *ti)
struct cache *cache = ti->private;
dm_cblock_t csize = get_cache_dev_size(cache);
if (!can_resume(cache))
return -EINVAL;
/*
* Check to see if the cache has resized.
*/

View File

@ -671,6 +671,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
DMERR("%s: too large device", dm_device_name(t->md));
return -EINVAL;
}
ti->type = dm_get_target_type(type);
if (!ti->type) {

View File

@ -1523,14 +1523,18 @@ static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
(REQ_IDLE | REQ_SYNC))
opf |= REQ_IDLE;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio;
ci->sector_count = 0;

View File

@ -194,6 +194,7 @@ struct adv7180_state;
#define ADV7180_FLAG_V2 BIT(1)
#define ADV7180_FLAG_MIPI_CSI2 BIT(2)
#define ADV7180_FLAG_I2P BIT(3)
#define ADV7180_FLAG_TEST_PATTERN BIT(4)
struct adv7180_chip_info {
unsigned int flags;
@ -673,11 +674,15 @@ static int adv7180_init_controls(struct adv7180_state *state)
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(test_pattern_menu) - 1,
0, ARRAY_SIZE(test_pattern_menu) - 1,
test_pattern_menu);
if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) {
v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl,
&adv7180_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(test_pattern_menu) - 1,
0,
ARRAY_SIZE(test_pattern_menu) - 1,
test_pattern_menu);
}
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
@ -1209,7 +1214,7 @@ static const struct adv7180_chip_info adv7182_info = {
};
static const struct adv7180_chip_info adv7280_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@ -1223,7 +1228,8 @@ static const struct adv7180_chip_info adv7280_info = {
};
static const struct adv7180_chip_info adv7280_m_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@ -1244,7 +1250,8 @@ static const struct adv7180_chip_info adv7280_m_info = {
};
static const struct adv7180_chip_info adv7281_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@ -1259,7 +1266,8 @@ static const struct adv7180_chip_info adv7281_info = {
};
static const struct adv7180_chip_info adv7281_m_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@ -1279,7 +1287,8 @@ static const struct adv7180_chip_info adv7281_m_info = {
};
static const struct adv7180_chip_info adv7281_ma_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@ -1304,7 +1313,7 @@ static const struct adv7180_chip_info adv7281_ma_info = {
};
static const struct adv7180_chip_info adv7282_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@ -1319,7 +1328,8 @@ static const struct adv7180_chip_info adv7282_info = {
};
static const struct adv7180_chip_info adv7282_m_info = {
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
.flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |

View File

@ -239,11 +239,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
}
}
if (!csid->testgen.enabled &&
@ -318,7 +320,8 @@ static void csid_try_format(struct csid_device *csid,
break;
case MSM_CSID_PAD_SRC:
if (csid->testgen_mode->cur.val == 0) {
if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
@ -368,7 +371,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->formats[code->index].code;
} else {
if (csid->testgen_mode->cur.val == 0) {
if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
@ -750,7 +754,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
if (csid->testgen_mode->cur.val != 0)
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
@ -843,25 +848,28 @@ int msm_csid_register_entity(struct csid_device *csid,
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
if (ret < 0) {
dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
return ret;
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
if (ret < 0) {
dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
return ret;
}
csid->testgen_mode =
v4l2_ctrl_new_std_menu_items(&csid->ctrls,
&csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
csid->testgen.nmodes, 0, 0,
csid->testgen.modes);
if (csid->ctrls.error) {
dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
ret = csid->ctrls.error;
goto free_ctrl;
}
csid->subdev.ctrl_handler = &csid->ctrls;
}
csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
&csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
csid->testgen.nmodes, 0, 0,
csid->testgen.modes);
if (csid->ctrls.error) {
dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
ret = csid->ctrls.error;
goto free_ctrl;
}
csid->subdev.ctrl_handler = &csid->ctrls;
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
dev_err(dev, "Failed to init format: %d\n", ret);
@ -891,7 +899,8 @@ int msm_csid_register_entity(struct csid_device *csid,
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
v4l2_ctrl_handler_free(&csid->ctrls);
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
}
@ -904,5 +913,6 @@ void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
v4l2_ctrl_handler_free(&csid->ctrls);
if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
v4l2_ctrl_handler_free(&csid->ctrls);
}

View File

@ -802,13 +802,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
of_node_put(i2c_bus);
ret = -ENODEV;
goto err_node_put;
}
of_node_put(i2c_bus);
tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);

View File

@ -894,9 +894,14 @@ static int vivid_thread_vid_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
continue;
wait_queue_head_t wait;
init_waitqueue_head(&wait);
wait_event_interruptible_timeout(wait, kthread_should_stop(),
cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Capture Thread End\n");
return 0;

View File

@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
continue;
wait_queue_head_t wait;
init_waitqueue_head(&wait);
wait_event_interruptible_timeout(wait, kthread_should_stop(),
cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Output Thread End\n");
return 0;

View File

@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
!kthread_should_stop())
schedule();
if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
continue;
wait_queue_head_t wait;
init_waitqueue_head(&wait);
wait_event_interruptible_timeout(wait, kthread_should_stop(),
cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Touch Capture Thread End\n");
return 0;

Some files were not shown because too many files have changed in this diff Show More