This is the 5.15.180 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmf3ukwACgkQONu9yGCS
 aT6JUA//QGjcJymP9sGPhvtCKY807MNa5G7zFB4WZ6cRRr5JHYKg63hTvwTM1MoW
 f82V98NfVH0MuUXRayD2ubpHM21jx1bb8EpdMC4n5o0VZNrs2EvlTtuOkGu+1IWn
 yMgq5qdQAjc8uWw9Rmai2REfS3HJszSk6+/CIcEBRxg6UWQ7B19GfVSsUgrnVZA8
 w0KIvA3nd8DLtMcrHS3NjxMV2HPQj0VhO3REkf0tq16qf8Gp6pyxKhVvsrpDTA53
 hoRSPDeYOYM5wj+k6iQcDP7wSCsQiFMGHaLnwXCyzPcepaSCXKgNvOQzMzJRX1qa
 wdR/fhvhr9SC5Ps4a+RD67ghaIqXdCb1YgRYId/BFytFUE+b/Fde4caZ3wBvkwHr
 BcgNI3Lwz08swZYiAuUcEWmR/2FM/j4OLmnD017ZXgJlaiLq9FMWnCbSfJouLbRE
 rmfkXxYM7It+eF3FWaWTZDPXEyuRF1kALdypvVbIE9RDYzSynGgFy7GcFEWnCv5s
 Fc4tdHhbBJc/k8zTURXbTn2kQblmwuMKfehn/E32Zg8PiNyT/lJ/l9GqWwqtaQhP
 RqXU+LzQ/c4alDS2WXb9ldzCseyvbhU56LJp5t8Nfvcmw9db2IfSBpgqmAiwxRpF
 OhyNQgiibkEIHQIRC+JAStdIahXc1aLL+a8WtYCe4GW3faMO9GQ=
 =Aa/c
 -----END PGP SIGNATURE-----

Merge tag 'v5.15.180' into v5.15/standard/base

This is the 5.15.180 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmf3ukwACgkQONu9yGCS
# aT6JUA//QGjcJymP9sGPhvtCKY807MNa5G7zFB4WZ6cRRr5JHYKg63hTvwTM1MoW
# f82V98NfVH0MuUXRayD2ubpHM21jx1bb8EpdMC4n5o0VZNrs2EvlTtuOkGu+1IWn
# yMgq5qdQAjc8uWw9Rmai2REfS3HJszSk6+/CIcEBRxg6UWQ7B19GfVSsUgrnVZA8
# w0KIvA3nd8DLtMcrHS3NjxMV2HPQj0VhO3REkf0tq16qf8Gp6pyxKhVvsrpDTA53
# hoRSPDeYOYM5wj+k6iQcDP7wSCsQiFMGHaLnwXCyzPcepaSCXKgNvOQzMzJRX1qa
# wdR/fhvhr9SC5Ps4a+RD67ghaIqXdCb1YgRYId/BFytFUE+b/Fde4caZ3wBvkwHr
# BcgNI3Lwz08swZYiAuUcEWmR/2FM/j4OLmnD017ZXgJlaiLq9FMWnCbSfJouLbRE
# rmfkXxYM7It+eF3FWaWTZDPXEyuRF1kALdypvVbIE9RDYzSynGgFy7GcFEWnCv5s
# Fc4tdHhbBJc/k8zTURXbTn2kQblmwuMKfehn/E32Zg8PiNyT/lJ/l9GqWwqtaQhP
# RqXU+LzQ/c4alDS2WXb9ldzCseyvbhU56LJp5t8Nfvcmw9db2IfSBpgqmAiwxRpF
# OhyNQgiibkEIHQIRC+JAStdIahXc1aLL+a8WtYCe4GW3faMO9GQ=
# =Aa/c
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 10 Apr 2025 08:32:12 AM EDT
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2025-04-13 22:55:23 -04:00
commit d3877a02f0
281 changed files with 2104 additions and 999 deletions

View File

@ -129,11 +129,8 @@ adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain
online to handle timekeeping tasks in order to ensure that system
calls like gettimeofday() returns accurate values on adaptive-tick CPUs.
(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running
user processes to observe slight drifts in clock rate.) Therefore, the
boot CPU is prohibited from entering adaptive-ticks mode. Specifying a
"nohz_full=" mask that includes the boot CPU will result in a boot-time
error message, and the boot CPU will be removed from the mask. Note that
this means that your system must have at least two CPUs in order for
user processes to observe slight drifts in clock rate.) Note that this
means that your system must have at least two CPUs in order for
CONFIG_NO_HZ_FULL=y to do anything for you.
Finally, adaptive-ticks CPUs must have their RCU callbacks offloaded.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 179
SUBLEVEL = 180
EXTRAVERSION =
NAME = Trick or Treat

View File

@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT))
/*
* These are used to set parameters in the core dumps.
@ -145,10 +145,6 @@ extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
#define SET_PERSONALITY(EX) \
set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
? PER_LINUX_32BIT : PER_LINUX)
extern int alpha_l1i_cacheshape;
extern int alpha_l1d_cacheshape;
extern int alpha_l2_cacheshape;

View File

@ -340,7 +340,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern void paging_init(void);
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
/* We have our own get_unmapped_area */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _ALPHA_PGTABLE_H */

View File

@ -8,23 +8,19 @@
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
#define STACK_TOP \
(current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
#define STACK_TOP (0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE \
((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
typedef struct {
unsigned long seg;

View File

@ -1212,8 +1212,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
return ret;
}
/* Get an address range which is currently unmapped. Similar to the
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
/* Get an address range which is currently unmapped. */
static unsigned long
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
@ -1235,13 +1234,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
unsigned long limit;
/* "32 bit" actually means 31 bit, since pointers sign extend. */
if (current->personality & ADDR_LIMIT_32BIT)
limit = 0x80000000;
else
limit = TASK_SIZE;
unsigned long limit = TASK_SIZE;
if (len > limit)
return -ENOMEM;

View File

@ -133,7 +133,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
arm,primecell-periphid = <0x00241011>;
arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@ -144,7 +144,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
arm,primecell-periphid = <0x00241011>;
arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@ -155,7 +155,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
arm,primecell-periphid = <0x00241011>;
arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@ -166,7 +166,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
arm,primecell-periphid = <0x00241011>;
arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@ -450,8 +450,6 @@
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>;
/* This only applies to the ARMv7 stub */
arm,cpu-registers-not-fw-configured;
};
cpus: cpus {
@ -1142,6 +1140,7 @@
};
&uart0 {
arm,primecell-periphid = <0x00341011>;
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
};

View File

@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep)
.long shmobile_smp_arg - 1b
.bss
.align 2
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
.space NR_CPUS * 4

View File

@ -26,6 +26,13 @@
#ifdef CONFIG_MMU
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long addr = (unsigned long)unsafe_src;
return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
}
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
@ -561,6 +568,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
pr_alert("8<--- cut here ---\n");
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);

View File

@ -117,7 +117,7 @@
};
&u2phy1_host {
status = "disabled";
phy-supply = <&vdd_5v>;
};
&uart0 {

View File

@ -190,8 +190,10 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
if (ret) {
dput(dentry);
return ret;
}
files++;
}
return 0;
@ -434,8 +436,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
if (ret)
if (ret) {
if (neighbor)
put_spu_context(neighbor);
goto out_aff_unlock;
}
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,

View File

@ -77,7 +77,7 @@ struct dyn_arch_ftrace {
#define make_call_t0(caller, callee, call) \
do { \
unsigned int offset = \
(unsigned long) callee - (unsigned long) caller; \
(unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_t0(offset); \
call[1] = to_jalr_t0(offset); \
} while (0)
@ -93,7 +93,7 @@ do { \
#define make_call_ra(caller, callee, call) \
do { \
unsigned int offset = \
(unsigned long) callee - (unsigned long) caller; \
(unsigned long) (callee) - (unsigned long) (caller); \
call[0] = to_auipc_ra(offset); \
call[1] = to_jalr_ra(offset); \
} while (0)

View File

@ -194,7 +194,7 @@ config X86
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
select HAVE_EISA if X86_32
select HAVE_EXIT_THREAD
select HAVE_FAST_GUP
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE

View File

@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */
/* We just clobbered the return address - use the IRET frame for unwinding: */
UNWIND_HINT_IRET_REGS offset=3*8
.else
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */

View File

@ -227,7 +227,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
flush_tlb_mm_range((vma)->vm_mm, start, end, \
((vma)->vm_flags & VM_HUGETLB) \
? huge_page_shift(hstate_vma(vma)) \
: PAGE_SHIFT, false)
: PAGE_SHIFT, true)
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,

View File

@ -861,7 +861,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
return ret;
}
for_each_node(nid) {
for_each_node_with_cpus(nid) {
cpu = cpumask_first(cpumask_of_node(nid));
c = &cpu_data(cpu);

View File

@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/i8253.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@ -445,16 +444,6 @@ static void __init ms_hyperv_init_platform(void)
if (efi_enabled(EFI_BOOT))
x86_platform.get_nmi_reason = hv_get_nmi_reason;
/*
* Hyper-V VMs have a PIT emulation quirk such that zeroing the
* counter register during PIT shutdown restarts the PIT. So it
* continues to interrupt @18.2 HZ. Setting i8253_clear_counter
* to false tells pit_shutdown() not to zero the counter so that
* the PIT really is shutdown. Generation 2 VMs don't have a PIT,
* and setting this value has no effect.
*/
i8253_clear_counter_on_shutdown = false;
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Setup the hook to get control post apic initialization.

View File

@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
u64 xfrm_mask;
int ret;
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
}
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) {
pr_err("SGX disabled: SGX1 instruction support not available.\n");
pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
}
@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
}
ret = misc_register(&sgx_dev_enclave);
if (ret)
if (ret) {
pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
return ret;
}
return 0;
}

View File

@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
stack = stack ?: get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);
/*
@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - hardirq stack
* - entry stack
*/
for (stack = stack ?: get_stack_pointer(task, regs);
stack;
stack = stack_info.next_sp) {
for (; stack; stack = stack_info.next_sp) {
const char *stack_name;
stack = PTR_ALIGN(stack, sizeof(long));

View File

@ -23,8 +23,10 @@
#include <asm/traps.h>
#include <asm/thermal.h>
#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
#endif
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);

View File

@ -83,7 +83,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
memcpy(dst, src, arch_task_struct_size);
/* init_task is not dynamically sized (incomplete FPU state) */
if (unlikely(src == &init_task))
memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
else
memcpy(dst, src, arch_task_struct_size);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif

View File

@ -920,7 +920,7 @@ static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
if (!sched_clock_stable())
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
@ -940,7 +940,7 @@ void tsc_restore_sched_clock_state(void)
unsigned long flags;
int cpu;
if (!sched_clock_stable())
if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
local_irq_save(flags);

View File

@ -183,7 +183,7 @@ static int pageattr_test(void)
break;
case 1:
err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1);
break;
case 2:

View File

@ -73,7 +73,7 @@ struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
char name[8];
char name[12];
};
static DEFINE_MUTEX(bio_slab_lock);
static DEFINE_XARRAY(bio_slabs);

View File

@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
if (family > NVDIMM_BUS_FAMILY_MAX ||
if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,

View File

@ -265,6 +265,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
if (!pr->power.states[ACPI_STATE_C2].address &&
!pr->power.states[ACPI_STATE_C3].address)
return -ENODEV;
return 0;
}

View File

@ -442,6 +442,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
},
},
{
/* Asus Vivobook X1404VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
},
},
{
/* Asus Vivobook X1504VAP */
.matches = {
@ -556,6 +563,12 @@ static const struct dmi_system_id maingear_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "RP-15"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
},
},
{
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {

View File

@ -897,6 +897,9 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
if (!dev->power.is_suspended)
goto Complete;
if (dev->power.direct_complete) {
/* Match the pm_runtime_disable() in __device_suspend(). */
pm_runtime_enable(dev);
@ -915,9 +918,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
if (!dev->power.is_suspended)
goto Unlock;
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@ -957,7 +957,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async)
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@ -1239,14 +1238,13 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
* Skipping the resume of devices that were in use right before the
* system suspend (as indicated by their PM-runtime usage counters)
* would be suboptimal. Also resume them if doing that is not allowed
* to be skipped.
* Devices must be resumed unless they are explicitly allowed to be left
* in suspend, but even in that case skipping the resume of devices that
* were in use right before the system suspend (as indicated by their
* runtime PM usage counters and child counters) would be suboptimal.
*/
if (atomic_read(&dev->power.usage_count) > 1 ||
!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume))
if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
if (dev->power.must_resume)
@ -1643,6 +1641,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
dev->power.is_suspended = true;
goto Complete;
}

View File

@ -1809,7 +1809,7 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
static bool pm_runtime_need_not_resume(struct device *dev)
bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||

View File

@ -1136,8 +1136,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = {
.hw.init = &(struct clk_init_data) {
.name = "cpu_clk_div16_en",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
&g12a_cpu_clk.hw
.parent_data = &(const struct clk_parent_data) {
/*
* Note:
* G12A and G12B have different cpu clocks (with
* different struct clk_hw). We fallback to the global
* naming string mechanism so this clock picks
* up the appropriate one. Same goes for the other
* clock using cpu cluster A clock output and present
* on both G12 variant.
*/
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
/*
@ -1202,7 +1212,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_apb_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1236,7 +1249,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_atb_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1270,7 +1286,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = {
.hw.init = &(struct clk_init_data){
.name = "cpu_clk_axi_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw },
.parent_data = &(const struct clk_parent_data) {
.name = "cpu_clk",
.index = -1,
},
.num_parents = 1,
},
};
@ -1305,13 +1324,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = {
.name = "cpu_clk_trace_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_data = &(const struct clk_parent_data) {
/*
* Note:
* G12A and G12B have different cpu_clks (with
* different struct clk_hw). We fallback to the global
* naming string mechanism so cpu_clk_trace_div picks
* up the appropriate one.
*/
.name = "cpu_clk",
.index = -1,
},
@ -4187,7 +4199,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4);
static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);

View File

@ -1270,14 +1270,13 @@ static struct clk_regmap gxbb_cts_i958 = {
},
};
/*
* This table skips a clock named 'cts_slow_oscin' in the documentation
* This clock does not exist yet in this controller or the AO one
*/
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
{ .fw_name = "xtal", },
/*
* FIXME: This clock is provided by the ao clock controller but the
* clock is not yet part of the binding of this controller, so string
* name must be use to set this parent.
*/
{ .name = "cts_slow_oscin", .index = -1 },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
};
@ -1287,6 +1286,7 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.offset = HHI_32K_CLK_CNTL,
.mask = 0x3,
.shift = 16,
.table = gxbb_32k_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
@ -1310,7 +1310,7 @@ static struct clk_regmap gxbb_32k_clk_div = {
&gxbb_32k_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
.flags = CLK_SET_RATE_PARENT,
},
};

View File

@ -3771,7 +3771,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.halt_reg = 0x4c02c,
.halt_check = BRANCH_HALT,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x4c02c,
.enable_mask = BIT(0),

View File

@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = {
.halt_reg = 0x1048,
.halt_check = BRANCH_HALT,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x1048,
.enable_mask = BIT(0),

View File

@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri",
"gpll_peri",
"hdmiphy_peri" };
PNAME(mux_ref_usb3otg_src_p) = { "xin24m",
"clk_usb3otg_ref" };
"clk_ref_usb3otg_src" };
PNAME(mux_xin24m_32k_p) = { "xin24m",
"clk_rtc32k" };
PNAME(mux_mac2io_src_p) = { "clk_mac2io_src",

View File

@ -64,11 +64,11 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
if (!ctx)
panic("could not allocate clock provider context.\n");
ctx->clk_data.num = nr_clks;
for (i = 0; i < nr_clks; ++i)
ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->reg_base = base;
ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
return ctx;

View File

@ -20,13 +20,6 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
/*
* Handle PIT quirk in pit_shutdown() where zeroing the counter register
* restarts the PIT, negating the shutdown. On platforms with the quirk,
* platform specific code can set this to false.
*/
bool i8253_clear_counter_on_shutdown __ro_after_init = true;
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@ -112,12 +105,33 @@ void clockevent_i8253_disable(void)
{
raw_spin_lock(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
* the datasheet. This appears to work on real hardware (well, on
* modern Intel and AMD boxes; I didn't dig the Pegasos out of the
* shed).
*
* However, some virtual implementations differ, and the MODE change
* doesn't have any effect until either the counter is written (KVM
* in-kernel PIT) or the next interrupt (QEMU). And in those cases,
* it may not stop the *count*, only the interrupts. Although in
* the virt case, that probably doesn't matter, as the value of the
* counter will only be calculated on demand if the guest reads it;
* it's the interrupts which cause steal time.
*
* Hyper-V apparently has a bug where even in mode 0, the IRQ keeps
* firing repeatedly if the counter is running. But it *does* do the
* right thing when the MODE register is written.
*
* So: write the MODE and then load the counter, which ensures that
* the IRQ is stopped on those buggy virt implementations. And then
* write the MODE again, which is the right way to stop it.
*/
outb_p(0x30, PIT_MODE);
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
if (i8253_clear_counter_on_shutdown) {
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
}
outb_p(0x30, PIT_MODE);
raw_spin_unlock(&i8253_lock);
}

View File

@ -370,6 +370,25 @@ static int mchp_tc_probe(struct platform_device *pdev)
channel);
}
/* Disable Quadrature Decoder and position measure */
ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0);
if (ret)
return ret;
/* Setup the period capture mode */
ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR),
ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK |
ATMEL_TC_TCCLKS,
ATMEL_TC_CMR_MASK);
if (ret)
return ret;
/* Enable clock and trigger counter */
ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR),
ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
if (ret)
return ret;
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
priv->counter.name = dev_name(&pdev->dev);

View File

@ -59,37 +59,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
return 0;
}
ret = clk_enable(priv->clk);
if (ret)
goto disable_cnt;
/* LP timer must be enabled before writing CMP & ARR */
ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
if (ret)
return ret;
goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
if (ret)
return ret;
goto disable_clk;
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
return ret;
goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
STM32_LPTIM_CMPOKCF_ARROKCF);
if (ret)
return ret;
goto disable_clk;
ret = clk_enable(priv->clk);
if (ret) {
regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
return ret;
}
priv->enabled = true;
/* Start LP timer in continuous mode */
return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
disable_clk:
clk_disable(priv->clk);
disable_cnt:
regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
return ret;
}
static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)

View File

@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
/*
* cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
* it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
* off, where idle_time is calculated by the difference between
* time elapsed in jiffies and "busy time" obtained from CPU
* statistics. If a CPU is 100% busy, the time elapsed and busy
* time should grow with the same amount in two consecutive
* samples, but in practice there could be a tiny difference,
* making the accumulated idle time decrease sometimes. Hence,
* in this case, idle_time should be regarded as 0 in order to
* make the further process correct.
*/
if (cur_idle_time > j_cdbs->prev_cpu_idle)
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
else
idle_time = 0;
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
} else if (unlikely((int)idle_time > 2 * sampling_rate &&
} else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
if (time_elapsed >= idle_time) {
if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
} else {
/*
* That can happen if idle_time is returned by
* get_cpu_idle_time_jiffy(). In that case
* idle_time is roughly equal to the difference
* between time_elapsed and "busy time" obtained
* from CPU statistics. Then, the "busy time"
* can end up being greater than time_elapsed
* (for example, if jiffies_64 and the CPU
* statistics are updated by different CPUs),
* so idle_time may in fact be negative. That
* means, though, that the CPU was busy all
* the time (on the rough average) during the
* last sampling interval and 100 can be
* returned as the load.
*/
load = (int)idle_time < 0 ? 100 : 0;
}
else
load = 0;
j_cdbs->prev_load = load;
}
if (unlikely((int)idle_time > 2 * sampling_rate)) {
if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)

View File

@ -47,8 +47,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
u64 rate = policy->freq_table[index].frequency * 1000;
unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@ -56,7 +57,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
if (clk_get_rate(priv->clk) != rate)
if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;

View File

@ -55,7 +55,6 @@
#define SEC_TYPE_MASK 0x0F
#define SEC_DONE_MASK 0x0001
#define SEC_ICV_MASK 0x000E
#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
@ -77,16 +76,16 @@
#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
SEC_PBUF_LEFT_SZ)
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
#define SEC_ICV_ERR 0x2
#define MIN_MAC_LEN 4
#define MAC_LEN_MASK 0x1U
#define MAX_INPUT_DATA_LEN 0xFFFE00
#define BITS_MASK 0xFF
#define WORD_MASK 0x3
#define BYTE_BITS 0x8
#define BYTES_TO_WORDS(bcount) ((bcount) >> 2)
#define SEC_XTS_NAME_SZ 0x3
#define IV_CM_CAL_NUM 2
#define IV_CL_MASK 0x7
@ -1048,11 +1047,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
struct crypto_shash *hash_tfm = ctx->hash_tfm;
int blocksize, digestsize, ret;
if (!keys->authkeylen) {
pr_err("hisi_sec2: aead auth key error!\n");
return -EINVAL;
}
blocksize = crypto_shash_blocksize(hash_tfm);
digestsize = crypto_shash_digestsize(hash_tfm);
if (keys->authkeylen > blocksize) {
@ -1064,7 +1058,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
}
ctx->a_key_len = digestsize;
} else {
memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
if (keys->authkeylen)
memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
ctx->a_key_len = keys->authkeylen;
}
@ -1133,7 +1128,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
if (ctx->a_ctx.a_key_len & WORD_MASK) {
ret = -EINVAL;
dev_err(dev, "AUTH key length error!\n");
goto bad_key;
@ -1538,11 +1533,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
@ -1594,12 +1588,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->a_key_len /
SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |=
cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
@ -2205,8 +2197,8 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
struct device *dev = ctx->dev;
int ret;
/* Hardware does not handle cases where authsize is less than 4 bytes */
if (unlikely(sz < MIN_MAC_LEN)) {
/* Hardware does not handle cases where authsize is not 4 bytes aligned */
if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK)) {
sreq->aead_req.fallback = true;
return -EINVAL;
}

View File

@ -1142,6 +1142,7 @@ static void __init nxcop_get_capabilities(void)
{
struct hv_vas_all_caps *hv_caps;
struct hv_nx_cop_caps *hv_nxc;
u64 feat;
int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
@ -1152,27 +1153,26 @@ static void __init nxcop_get_capabilities(void)
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0,
(u64)virt_to_phys(hv_caps));
if (!rc)
feat = be64_to_cpu(hv_caps->feat_type);
kfree(hv_caps);
if (rc)
goto out;
return;
if (!(feat & VAS_NX_GZIP_FEAT_BIT))
return;
caps_feat = be64_to_cpu(hv_caps->feat_type);
/*
* NX-GZIP feature available
*/
if (caps_feat & VAS_NX_GZIP_FEAT_BIT) {
hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
if (!hv_nxc)
goto out;
/*
* Get capabilities for NX-GZIP feature
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
VAS_NX_GZIP_FEAT,
(u64)virt_to_phys(hv_nxc));
} else {
pr_err("NX-GZIP feature is not available\n");
rc = -EINVAL;
}
hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL);
if (!hv_nxc)
return;
/*
* Get capabilities for NX-GZIP feature
*/
rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES,
VAS_NX_GZIP_FEAT,
(u64)virt_to_phys(hv_nxc));
if (!rc) {
nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor);
@ -1182,13 +1182,10 @@ static void __init nxcop_get_capabilities(void)
be64_to_cpu(hv_nxc->min_compress_len);
nx_cop_caps.min_decompress_len =
be64_to_cpu(hv_nxc->min_decompress_len);
} else {
caps_feat = 0;
caps_feat = feat;
}
kfree(hv_nxc);
out:
kfree(hv_caps);
}
static const struct vio_device_id nx842_vio_driver_ids[] = {

View File

@ -83,8 +83,6 @@
(((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
#define IE31200_RANKS_PER_CHANNEL 4
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
@ -156,6 +154,7 @@
#define IE31200_MAD_DIMM_0_OFFSET 0x5004
#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
#define IE31200_MAD_DIMM_SIZE_SKL GENMASK_ULL(5, 0)
#define IE31200_MAD_DIMM_A_RANK BIT(17)
#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
@ -369,7 +368,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
int chan)
{
dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL;
dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
(IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
@ -418,7 +417,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = IE31200_DIMMS;
layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
@ -608,7 +607,7 @@ static int __init ie31200_init(void)
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
goto fail0;
return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@ -619,11 +618,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
goto fail0;
}
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@ -631,12 +632,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
return 0;
return 0;
fail1:
pci_unregister_driver(&ie31200_driver);
fail0:
pci_dev_put(mci_pdev);
fail0:
pci_unregister_driver(&ie31200_driver);
return pci_rc;
}

View File

@ -279,6 +279,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {

View File

@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
if (nic->subnet_mask_prefix > 32)
val = cpu_to_be32(~0);
else
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:

View File

@ -2294,7 +2294,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s4 = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
@ -2303,8 +2302,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
return amdgpu_device_resume(drm_dev, true);
r = amdgpu_device_resume(drm_dev, true);
adev->in_s4 = false;
return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@ -2317,6 +2321,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}

View File

@ -87,7 +87,7 @@ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};

View File

@ -123,7 +123,7 @@ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};

View File

@ -218,6 +218,10 @@ amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness);
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@ -2698,8 +2702,19 @@ static int dm_resume(void *handle)
mutex_unlock(&dm->dc_lock);
/* set the backlight after a reset */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i])
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
return 0;
}
/* leave display off for S4 sequence */
if (adev->in_s4)
return 0;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);

View File

@ -396,6 +396,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);

View File

@ -1025,6 +1025,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
if (!plane_state ||
!plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
ASSERT(0);
return false;
}
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
@ -1901,10 +1911,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
break;
break;
case COLOR_DEPTH_141414:
normalized_pix_clk = (pix_clk * 42) / 24;
break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
break;
break;
default:
ASSERT(0);
break;

View File

@ -283,10 +283,10 @@ static void CalculateDynamicMetadataParameters(
double DISPCLK,
double DCFClkDeepSleep,
double PixelClock,
long HTotal,
long VBlank,
long DynamicMetadataTransmittedBytes,
long DynamicMetadataLinesBeforeActiveRequired,
unsigned int HTotal,
unsigned int VBlank,
unsigned int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired,
int InterlaceEnable,
bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup,
@ -3375,8 +3375,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes,
int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
{
double TotalRepeaterDelayTime = 0;

View File

@ -867,11 +867,30 @@ static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
}
}
static unsigned int get_pipe_idx(struct display_mode_lib *mode_lib, unsigned int plane_idx)
{
int pipe_idx = -1;
int i;
ASSERT(plane_idx < DC__NUM_DPP__MAX);
for (i = 0; i < DC__NUM_DPP__MAX ; i++) {
if (plane_idx == mode_lib->vba.pipe_plane[i]) {
pipe_idx = i;
break;
}
}
ASSERT(pipe_idx >= 0);
return pipe_idx;
}
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
{
soc_bounding_box_st *soc = &mode_lib->vba.soc;
unsigned int k;
unsigned int total_pipes = 0;
unsigned int pipe_idx = 0;
mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBWPerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb];
@ -892,6 +911,11 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
// Total Available Pipes Support Check
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
pipe_idx = get_pipe_idx(mode_lib, k);
if (pipe_idx == -1) {
ASSERT(0);
continue; // skip inactive planes
}
total_pipes += mode_lib->vba.DPPPerPlane[k];
}
ASSERT(total_pipes <= DC__NUM_DPP__MAX);

View File

@ -1231,19 +1231,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
value);
}
static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
DpmDescriptor_t *dpm_desc = NULL;
uint32_t clk_index = 0;
int clk_index = 0;
clk_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_CLK,
clk_type);
if (clk_index < 0)
return clk_index;
dpm_desc = &pptable->DpmDescriptor[clk_index];
/* 0 - Fine grained DPM, 1 - Discrete DPM */
return dpm_desc->SnapToDiscrete == 0;
return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
}
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
@ -1299,7 +1302,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
if (ret)
return size;
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
return ret;
if (!ret) {
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
@ -1468,7 +1475,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
return ret;
if (ret) {
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
}

View File

@ -424,6 +424,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
const char *name)
{
struct device *dev = pdata->dev;
const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
int ret;
@ -432,6 +433,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
return -ENOMEM;
aux->name = name;
aux->id = (client->adapter->nr << 10) | client->addr;
aux->dev.parent = dev;
aux->dev.release = ti_sn65dsi86_aux_device_release;
device_set_of_node_from_dev(&aux->dev, dev);

View File

@ -964,6 +964,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (connector->dpms == mode)
goto out;
connector->dpms = mode;
crtc = connector->state->crtc;

View File

@ -980,6 +980,10 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
* On atomic drivers any DPMS setproperty ioctl where the value does not
* change is completely skipped, otherwise a full atomic commit will occur.
* On legacy drivers the exact behavior is driver specific.
*
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*

View File

@ -178,13 +178,13 @@ static int
drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
{
int i;
u8 unpacked_rad[16];
u8 unpacked_rad[16] = {};
for (i = 0; i < lct; i++) {
for (i = 1; i < lct; i++) {
if (i % 2)
unpacked_rad[i] = rad[i / 2] >> 4;
unpacked_rad[i] = rad[(i - 1) / 2] >> 4;
else
unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
unpacked_rad[i] = rad[(i - 1) / 2] & 0xF;
}
/* TODO: Eventually add something to printk so we can format the rad

View File

@ -280,6 +280,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
if (pci_gfx_root == NULL) {
WARN_ON(1);
return;
}
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);

View File

@ -920,12 +920,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct mtk_dsi *dsi = host_to_dsi(host);
u32 recv_cnt, i;
ssize_t recv_cnt;
u8 read_data[16];
void *src_addr;
u8 irq_flag = CMD_DONE_INT_FLAG;
u32 dsi_mode;
int ret;
int ret, i;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL);
if (dsi_mode & MODE) {
@ -974,7 +974,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
if (recv_cnt)
memcpy(msg->rx_buf, src_addr, recv_cnt);
DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n",
recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode:

View File

@ -138,7 +138,7 @@ enum hdmi_aud_channel_swap_type {
struct hdmi_audio_param {
enum hdmi_audio_coding_type aud_codec;
enum hdmi_audio_sample_size aud_sampe_size;
enum hdmi_audio_sample_size aud_sample_size;
enum hdmi_aud_input_type aud_input_type;
enum hdmi_aud_i2s_fmt aud_i2s_fmt;
enum hdmi_aud_mclk aud_mclk;
@ -174,6 +174,7 @@ struct mtk_hdmi {
unsigned int sys_offset;
void __iomem *regs;
enum hdmi_colorspace csp;
struct platform_device *audio_pdev;
struct hdmi_audio_param aud_param;
bool audio_enable;
bool powered;
@ -1075,7 +1076,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
hdmi->csp = HDMI_COLORSPACE_RGB;
aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
@ -1576,14 +1577,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
switch (daifmt->fmt) {
case HDMI_I2S:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
break;
case HDMI_SPDIF:
hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
break;
default:
@ -1667,6 +1668,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.no_capture_mute = 1,
};
static void mtk_hdmi_unregister_audio_driver(void *data)
{
platform_device_unregister(data);
}
static int mtk_hdmi_register_audio_driver(struct device *dev)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
@ -1676,13 +1682,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.i2s = 1,
.data = hdmi,
};
struct platform_device *pdev;
int ret;
pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO, &codec_data,
sizeof(codec_data));
if (IS_ERR(pdev))
return PTR_ERR(pdev);
hdmi->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
if (IS_ERR(hdmi->audio_pdev))
return PTR_ERR(hdmi->audio_pdev);
ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
hdmi->audio_pdev);
if (ret)
return ret;
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
return 0;

View File

@ -754,7 +754,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
connector->status = connector_status_disconnected;
return;
}

View File

@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
uint32_t tmp, handle = 0;
uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;

View File

@ -202,11 +202,15 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error))
return NULL;
v3d->tfu_job = job;
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@ -240,6 +244,9 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i;
if (unlikely(job->base.base.s_fence->finished.error))
return NULL;
v3d->csd_job = job;
v3d_invalidate_caches(v3d);

View File

@ -226,17 +226,19 @@ static int __init vkms_init(void)
if (!config)
return -ENOMEM;
default_config = config;
config->cursor = enable_cursor;
config->writeback = enable_writeback;
config->overlay = enable_overlay;
ret = vkms_create(config);
if (ret)
if (ret) {
kfree(config);
return ret;
}
return ret;
default_config = config;
return 0;
}
static void vkms_destroy(struct vkms_config *config)
@ -260,9 +262,10 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void)
{
if (default_config->dev)
vkms_destroy(default_config);
if (!default_config)
return;
vkms_destroy(default_config);
kfree(default_config);
}

View File

@ -204,6 +204,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret)
return ret;
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);

View File

@ -144,7 +144,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/

View File

@ -1037,6 +1037,7 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33

View File

@ -6,9 +6,6 @@
* Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
*/
/*
*/
#include "hid-ids.h"
#include <linux/hid.h>
@ -23,30 +20,28 @@
#define PLT_VOL_UP 0x00b1
#define PLT_VOL_DOWN 0x00b2
#define PLT_MIC_MUTE 0x00b5
#define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP)
#define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN)
#define PLT1_MIC_MUTE (PLT_HID_1_0_PAGE | PLT_MIC_MUTE)
#define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP)
#define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN)
#define PLT2_MIC_MUTE (PLT_HID_2_0_PAGE | PLT_MIC_MUTE)
#define HID_TELEPHONY_MUTE (HID_UP_TELEPHONY | 0x2f)
#define HID_CONSUMER_MUTE (HID_UP_CONSUMER | 0xe2)
#define PLT_DA60 0xda60
#define PLT_BT300_MIN 0x0413
#define PLT_BT300_MAX 0x0418
#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
struct plt_drv_data {
unsigned long device_type;
unsigned long last_volume_key_ts;
u32 quirks;
unsigned long last_key_ts;
unsigned long double_key_to;
__u16 last_key;
};
static int plantronics_input_mapping(struct hid_device *hdev,
@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned short mapped_key;
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
unsigned long plt_type = drv_data->device_type;
int allow_mute = usage->hid == HID_TELEPHONY_MUTE;
int allow_consumer = field->application == HID_CP_CONSUMERCONTROL &&
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER &&
usage->hid != HID_CONSUMER_MUTE;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
goto defaulted;
/* handle volume up/down mapping */
/* non-standard types or multi-HID interfaces - plt_type is PID */
if (!(plt_type & HID_USAGE_PAGE)) {
switch (plt_type) {
case PLT_DA60:
if (PLT_ALLOW_CONSUMER)
if (allow_consumer)
goto defaulted;
goto ignored;
if (usage->hid == HID_CONSUMER_MUTE) {
mapped_key = KEY_MICMUTE;
goto mapped;
}
break;
default:
if (PLT_ALLOW_CONSUMER)
if (allow_consumer || allow_mute)
goto defaulted;
}
goto ignored;
}
/* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
/* 'basic telephony compliant' - allow default consumer page map */
else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
(plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) {
if (PLT_ALLOW_CONSUMER)
goto defaulted;
}
/* not 'basic telephony' - apply legacy mapping */
/* only map if the field is in the device's primary vendor page */
else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) {
/* handle standard consumer control mapping */
/* and standard telephony mic mute mapping */
if (allow_consumer || allow_mute)
goto defaulted;
/* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
/* if not 'basic telephony compliant' - map vendor unique controls */
if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
(plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) &&
!((field->application ^ plt_type) & HID_USAGE_PAGE))
switch (usage->hid) {
case PLT1_VOL_UP:
case PLT2_VOL_UP:
@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev,
case PLT2_VOL_DOWN:
mapped_key = KEY_VOLUMEDOWN;
goto mapped;
case PLT1_MIC_MUTE:
case PLT2_MIC_MUTE:
mapped_key = KEY_MICMUTE;
goto mapped;
}
}
/*
* Future mapping of call control or other usages,
@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
*/
ignored:
hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
usage->hid, field->application);
return -1;
defaulted:
@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
unsigned long prev_tsto, cur_ts;
__u16 prev_key, cur_key;
if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
unsigned long prev_ts, cur_ts;
/* Usages are filtered in plantronics_usages. */
/* Usages are filtered in plantronics_usages. */
/* HZ too low for ms resolution - double key detection disabled */
/* or it is a key release - handle key presses only. */
if (!drv_data->double_key_to || !value)
return 0;
if (!value) /* Handle key presses only. */
return 0;
prev_tsto = drv_data->last_key_ts + drv_data->double_key_to;
cur_ts = drv_data->last_key_ts = jiffies;
prev_key = drv_data->last_key;
cur_key = drv_data->last_key = usage->code;
prev_ts = drv_data->last_volume_key_ts;
cur_ts = jiffies;
if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
return 1; /* Ignore the repeated key. */
drv_data->last_volume_key_ts = cur_ts;
/* If the same key occurs in <= double_key_to -- ignore it */
if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) {
hid_dbg(hdev, "double key %d ignored\n", cur_key);
return 1; /* Ignore the repeated key. */
}
if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
unsigned long prev_ts, cur_ts;
/* Usages are filtered in plantronics_usages. */
if (!value) /* Handle key presses only. */
return 0;
prev_ts = drv_data->last_volume_key_ts;
cur_ts = jiffies;
if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
return 1; /* Ignore the followed opposite volume key. */
drv_data->last_volume_key_ts = cur_ts;
}
return 0;
}
@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err;
return ret;
}
drv_data->device_type = plantronics_device_type(hdev);
drv_data->quirks = id->driver_data;
drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
drv_data->last_key_ts = jiffies - drv_data->double_key_to;
/* if HZ does not allow ms resolution - disable double key detection */
if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT)
drv_data->double_key_to = 0;
hid_set_drvdata(hdev, drv_data);
@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev,
if (ret)
hid_err(hdev, "hw start failed\n");
err:
return ret;
}
static const struct hid_device_id plantronics_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
.driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices);
static const struct hid_usage_id plantronics_usages[] = {
{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
{ HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID },
{ HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID },
{ PLT2_VOL_UP, EV_KEY, HID_ANY_ID },
{ PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID },
{ PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID },
{ PLT1_VOL_UP, EV_KEY, HID_ANY_ID },
{ PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID },
{ PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID },
{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
};

View File

@ -871,6 +871,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ }
};

View File

@ -577,14 +577,14 @@ static void fw_reset_work_fn(struct work_struct *unused)
static void _ish_sync_fw_clock(struct ishtp_device *dev)
{
static unsigned long prev_sync;
uint64_t usec;
struct ipc_time_update_msg time = {};
if (prev_sync && jiffies - prev_sync < 20 * HZ)
return;
prev_sync = jiffies;
usec = ktime_to_us(ktime_get_boottime());
ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
/* The fields of time would be updated while sending message */
ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time));
}
/**

View File

@ -2419,12 +2419,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
/*
* If all bytes of the MMIO range to be released are within the
* special case fb_mmio shadow region, skip releasing the shadow
* region since no corresponding __request_region() was done
* in vmbus_allocate_mmio().
*/
if (fb_mmio && start >= fb_mmio->start &&
(start + size - 1 <= fb_mmio->end))
goto skip_shadow_release;
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
skip_shadow_release:
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);

View File

@ -420,8 +420,8 @@ static const s8 NCT6776_BEEP_BITS[] = {
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };

View File

@ -267,7 +267,7 @@ catu_init_sg_table(struct device *catu_dev, int node,
* Each table can address upto 1MB and we can have
* CATU_PAGES_PER_SYSPAGE tables in a system page.
*/
nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M);
catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
size >> PAGE_SHIFT, pages);
if (IS_ERR(catu_table))

View File

@ -490,6 +490,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
"ALI1535 not detected, module not inserted.\n");
@ -501,7 +503,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
"SMBus ALI1535 adapter at %04x", ali1535_offset);
return i2c_add_adapter(&ali1535_adapter);
ret = i2c_add_adapter(&ali1535_adapter);
if (ret)
goto release_region;
return 0;
release_region:
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
return ret;
}
static void ali1535_remove(struct pci_dev *dev)

View File

@ -473,6 +473,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
"ALI15X3 not detected, module not inserted.\n");
@ -484,7 +486,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
return i2c_add_adapter(&ali15x3_adapter);
ret = i2c_add_adapter(&ali15x3_adapter);
if (ret)
goto release_region;
return 0;
release_region:
release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
return ret;
}
static void ali15x3_remove(struct pci_dev *dev)

View File

@ -1049,23 +1049,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
return 0;
}
static irqreturn_t
omap_i2c_isr(int irq, void *dev_id)
{
struct omap_i2c_dev *omap = dev_id;
irqreturn_t ret = IRQ_HANDLED;
u16 mask;
u16 stat;
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
if (stat & mask)
ret = IRQ_WAKE_THREAD;
return ret;
}
static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
u16 bits;
@ -1096,8 +1079,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
}
if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
omap->cmd_err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
if (!(stat & ~OMAP_I2C_STAT_NACK)) {
err = -EAGAIN;
break;
}
}
if (stat & OMAP_I2C_STAT_AL) {
@ -1475,7 +1463,7 @@ omap_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, pdev->name, omap);
else
r = devm_request_threaded_irq(&pdev->dev, omap->irq,
omap_i2c_isr, omap_i2c_isr_thread,
NULL, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
pdev->name, omap);

View File

@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
ret = i2c_add_adapter(&sis630_adapter);
if (ret)
goto release_region;
return 0;
release_region:
release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
return ret;
}
static void sis630_remove(struct pci_dev *dev)

View File

@ -807,7 +807,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
/* Create the IBIRULES register for both cases */
i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {

View File

@ -709,7 +709,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct mma8452_data *data = iio_priv(indio_dev);
int i, ret;
int i, j, ret;
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
@ -769,14 +769,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = mma8452_get_odr_index(data);
j = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
if (mma8452_os_ratio[i][ret] == val) {
if (mma8452_os_ratio[i][j] == val) {
ret = mma8452_set_power_mode(data, i);
break;
}
}
if (i == ARRAY_SIZE(mma8452_os_ratio)) {
ret = -EINVAL;
break;
}
break;
default:
ret = -EINVAL;

View File

@ -144,7 +144,11 @@ struct ad7124_chip_info {
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
/* Following fields are used to compare equality. */
/*
* Following fields are used to compare for equality. If you
* make adaptations in it, you most likely also have to adapt
* ad7124_find_similar_live_cfg(), too.
*/
struct_group(config_props,
enum ad7124_ref_sel refsel;
bool bipolar;
@ -331,15 +335,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
struct ad7124_channel_config *cfg)
{
struct ad7124_channel_config *cfg_aux;
ptrdiff_t cmp_size;
int i;
cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
/*
* This is just to make sure that the comparison is adapted after
* struct ad7124_channel_config was changed.
*/
static_assert(sizeof_field(struct ad7124_channel_config, config_props) ==
sizeof(struct {
enum ad7124_ref_sel refsel;
bool bipolar;
bool buf_positive;
bool buf_negative;
unsigned int vref_mv;
unsigned int pga_bits;
unsigned int odr;
unsigned int odr_sel_bits;
unsigned int filter_type;
}));
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live &&
!memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
cfg->refsel == cfg_aux->refsel &&
cfg->bipolar == cfg_aux->bipolar &&
cfg->buf_positive == cfg_aux->buf_positive &&
cfg->buf_negative == cfg_aux->buf_negative &&
cfg->vref_mv == cfg_aux->vref_mv &&
cfg->pga_bits == cfg_aux->pga_bits &&
cfg->odr == cfg_aux->odr &&
cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
cfg->filter_type == cfg_aux->filter_type)
return cfg_aux;
}

View File

@ -542,6 +542,8 @@ static struct class ib_class = {
static void rdma_init_coredev(struct ib_core_device *coredev,
struct ib_device *dev, struct net *net)
{
bool is_full_dev = &dev->coredev == coredev;
/* This BUILD_BUG_ON is intended to catch layout change
* of union of ib_core_device and device.
* dev must be the first element as ib_core and providers
@ -553,6 +555,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
coredev->dev.class = &ib_class;
coredev->dev.groups = dev->groups;
/*
* Don't expose hw counters outside of the init namespace.
*/
if (!is_full_dev && dev->hw_stats_attr_index)
coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
device_initialize(&coredev->dev);
coredev->owner = dev;
INIT_LIST_HEAD(&coredev->port_list);

View File

@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
struct ib_mad_private *mad)
{
unsigned long flags;
int post, ret;
struct ib_mad_private *mad_priv;
struct ib_sge sg_list;
struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
int ret = 0;
/* Initialize common scatter list fields */
sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
recv_wr.sg_list = &sg_list;
recv_wr.num_sge = 1;
do {
while (true) {
/* Allocate and map receive buffer */
if (mad) {
mad_priv = mad;
@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
} else {
mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
break;
}
if (!mad_priv)
return -ENOMEM;
}
sg_list.length = mad_priv_dma_size(mad_priv);
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
kfree(mad_priv);
ret = -ENOMEM;
break;
goto free_mad_priv;
}
mad_priv->header.mapping = sg_list.addr;
mad_priv->header.mad_list.mad_queue = recv_queue;
mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
/* Post receive WR */
spin_lock_irqsave(&recv_queue->lock, flags);
post = (++recv_queue->count < recv_queue->max_active);
list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
if (recv_queue->count >= recv_queue->max_active) {
/* Fully populated the receive queue */
spin_unlock_irqrestore(&recv_queue->lock, flags);
break;
}
recv_queue->count++;
list_add_tail(&mad_priv->header.mad_list.list,
&recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags);
ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
ib_dma_unmap_single(qp_info->port_priv->device,
mad_priv->header.mapping,
mad_priv_dma_size(mad_priv),
DMA_FROM_DEVICE);
kfree(mad_priv);
dev_err(&qp_info->port_priv->device->dev,
"ib_post_recv failed: %d\n", ret);
break;
}
} while (post);
}
ib_dma_unmap_single(qp_info->port_priv->device,
mad_priv->header.mapping,
mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE);
free_mad_priv:
kfree(mad_priv);
return ret;
}

View File

@ -976,6 +976,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
if (!ibdev->groups[i]) {
ibdev->groups[i] = &data->group;
ibdev->hw_stats_attr_index = i;
return 0;
}
WARN(true, "struct ib_device->groups is too small");

View File

@ -1181,8 +1181,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
qp->modify_flags &=
~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;

View File

@ -220,9 +220,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */

View File

@ -1410,6 +1410,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
return ret;
}
/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
* the bt page size is not expanded by cal_best_bt_pg_sz()
*/
#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
@ -1418,6 +1423,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
int loop;
int unit;
int ret;
int i;
@ -1435,7 +1441,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
continue;
end = r->offset + r->count;
for (ofs = r->offset; ofs < end; ofs += unit) {
for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
cond_resched();
ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
hem_list->mid_bt[i],
&hem_list->btm_bt);
@ -1487,19 +1496,22 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt, u64 *phy_addr)
int offset, int *mtt_cnt)
{
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
u64 phy_base = 0;
int loop = 1;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
cond_resched();
loop++;
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
nr = hem->end + 1 - offset;
break;
}
@ -1508,8 +1520,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
if (mtt_cnt)
*mtt_cnt = nr;
if (phy_addr)
*phy_addr = phy_base;
return cpu_base;
}

View File

@ -132,7 +132,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt, u64 *phy_addr);
int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)

View File

@ -185,7 +185,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_RC_RNR_NAK_GEN;
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
props->max_sge_rd = 1;
props->max_sge_rd = hr_dev->caps.max_sq_sg;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
props->max_mr = hr_dev->caps.num_mtpts;

View File

@ -614,7 +614,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
while (offset < end && npage < max_count) {
count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset, &count, NULL);
offset, &count);
if (!mtts)
return -ENOBUFS;
@ -864,7 +864,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
&mtt_count, NULL);
&mtt_count);
if (!mtts || !mtt_count)
goto done;

View File

@ -842,12 +842,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
if (has_sdb) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@ -858,7 +860,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
if (has_rdb) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@ -872,7 +874,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
return 0;
err_sdb:
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
if (has_sdb)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@ -1115,7 +1117,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
min(udata->outlen, sizeof(resp)));
if (ret) {
ibdev_err(ibdev, "copy qp resp failed!\n");
goto err_store;
goto err_flow_ctrl;
}
}

View File

@ -481,7 +481,7 @@ repoll:
}
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) {
/* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.

View File

@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
u8 ratio;
if (state->revision == 0x8090) {
u32 internal = dib8000_read32(state, 23) / 1000;
ratio = 4;
unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000);
unit_khz_dds_val = (1<<26) / (internal ?: 1);
if (offset_khz < 0)
dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val);
else

View File

@ -1460,7 +1460,7 @@ err_mutex:
return ret;
}
static int __exit et8ek8_remove(struct i2c_client *client)
static int et8ek8_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
@ -1504,7 +1504,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
.of_match_table = et8ek8_of_table,
},
.probe_new = et8ek8_probe,
.remove = __exit_p(et8ek8_remove),
.remove = et8ek8_remove,
.id_table = et8ek8_id_table,
};

View File

@ -3740,6 +3740,7 @@ static int allegro_probe(struct platform_device *pdev)
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"failed to request firmware: %d\n", ret);
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}

View File

@ -813,6 +813,7 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev)
host->eject = true;
cancel_work_sync(&host->handle_req);
cancel_delayed_work_sync(&host->poll_card);
mutex_lock(&host->host_mutex);
if (host->req) {

View File

@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
unsigned long bit = BIT(offset);
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;
@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
void __iomem *regs = smchip->regbase;
unsigned long bit = 1 << offset;
unsigned long bit = BIT(offset);
unsigned long save;
unsigned long ddr;
@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip,
{
struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
unsigned long bit = BIT(offset);
void __iomem *regs = smchip->regbase;
unsigned long save;
unsigned long val;

View File

@ -2507,8 +2507,10 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
if (ret == -EPROBE_DEFER)
if (ret == -EPROBE_DEFER) {
clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
}
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;

View File

@ -32,6 +32,8 @@
struct sdhci_brcmstb_priv {
void __iomem *cfg_regs;
unsigned int flags;
struct clk *base_clk;
u32 base_freq_hz;
};
struct brcmstb_match_priv {
@ -251,9 +253,11 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
const struct of_device_id *match;
struct sdhci_brcmstb_priv *priv;
u32 actual_clock_mhz;
struct sdhci_host *host;
struct resource *iomem;
struct clk *clk;
struct clk *base_clk = NULL;
int res;
match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node);
@ -331,6 +335,35 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
/* Change the base clock frequency if the DT property exists */
if (device_property_read_u32(&pdev->dev, "clock-frequency",
&priv->base_freq_hz) != 0)
goto add_host;
base_clk = devm_clk_get_optional(&pdev->dev, "sdio_freq");
if (IS_ERR(base_clk)) {
dev_warn(&pdev->dev, "Clock for \"sdio_freq\" not found\n");
goto add_host;
}
res = clk_prepare_enable(base_clk);
if (res)
goto err;
/* set improved clock rate */
clk_set_rate(base_clk, priv->base_freq_hz);
actual_clock_mhz = clk_get_rate(base_clk) / 1000000;
host->caps &= ~SDHCI_CLOCK_V3_BASE_MASK;
host->caps |= (actual_clock_mhz << SDHCI_CLOCK_BASE_SHIFT);
/* Disable presets because they are now incorrect */
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
dev_dbg(&pdev->dev, "Base Clock Frequency changed to %dMHz\n",
actual_clock_mhz);
priv->base_clk = base_clk;
add_host:
res = sdhci_brcmstb_add_host(host, priv);
if (res)
goto err;
@ -341,6 +374,7 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
err:
sdhci_pltfm_free(pdev);
err_clk:
clk_disable_unprepare(base_clk);
clk_disable_unprepare(clk);
return res;
}
@ -352,11 +386,61 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
#ifdef CONFIG_PM_SLEEP
static int sdhci_brcmstb_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
clk_disable_unprepare(priv->base_clk);
if (host->mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(host->mmc);
if (ret)
return ret;
}
return sdhci_pltfm_suspend(dev);
}
static int sdhci_brcmstb_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_pltfm_resume(dev);
if (!ret && priv->base_freq_hz) {
ret = clk_prepare_enable(priv->base_clk);
/*
* Note: using clk_get_rate() below as clk_get_rate()
* honors CLK_GET_RATE_NOCACHE attribute, but clk_set_rate()
* may do implicit get_rate() calls that do not honor
* CLK_GET_RATE_NOCACHE.
*/
if (!ret &&
(clk_get_rate(priv->base_clk) != priv->base_freq_hz))
ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
}
if (host->mmc->caps2 & MMC_CAP2_CQE)
ret = cqhci_resume(host->mmc);
return ret;
}
#endif
static const struct dev_pm_ops sdhci_brcmstb_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_brcmstb_suspend, sdhci_brcmstb_resume)
};
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.pm = &sdhci_brcmstb_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,

View File

@ -401,6 +401,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;

Some files were not shown because too many files have changed in this diff Show More