Merge tag 'v5.2.43' into v5.2/standard/base

This is the 5.2.43 stable release
This commit is contained in:
Bruce Ashfield 2020-06-05 14:46:24 -04:00
commit 754af62d6b
378 changed files with 3641 additions and 1702 deletions

View File

@ -4956,8 +4956,7 @@
usbcore.old_scheme_first= usbcore.old_scheme_first=
[USB] Start with the old device initialization [USB] Start with the old device initialization
scheme, applies only to low and full-speed devices scheme (default 0 = off).
(default 0 = off).
usbcore.usbfs_memory_mb= usbcore.usbfs_memory_mb=
[USB] Memory limit (in MB) for buffers allocated by [USB] Memory limit (in MB) for buffers allocated by

View File

@ -63,6 +63,7 @@ stable kernels.
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 | | ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
| ARM | MMU-500 | #841119,826419 | N/A | | ARM | MMU-500 | #841119,826419 | N/A |
| | | | | | | | | |
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 2 PATCHLEVEL = 2
SUBLEVEL = 42 SUBLEVEL = 43
EXTRAVERSION = EXTRAVERSION =
NAME = Bobtail Squid NAME = Bobtail Squid
@ -880,6 +880,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree # change __FILE__ to the relative path from the srctree
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
# use the deterministic mode of AR if available # use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D) KBUILD_ARFLAGS := $(call ar-option,D)

View File

@ -486,6 +486,7 @@
"dsi0_ddr2", "dsi0_ddr2",
"dsi0_ddr"; "dsi0_ddr";
status = "disabled";
}; };
thermal: thermal@7e212000 { thermal: thermal@7e212000 {

View File

@ -153,6 +153,7 @@
bus-width = <4>; bus-width = <4>;
keep-power-in-suspend; keep-power-in-suspend;
mmc-pwrseq = <&pwrseq_ti_wifi>; mmc-pwrseq = <&pwrseq_ti_wifi>;
cap-power-off-card;
non-removable; non-removable;
vmmc-supply = <&vcc_3v3>; vmmc-supply = <&vcc_3v3>;
/* vqmmc-supply = <&nvcc_sd1>; - MMC layer doesn't like it! */ /* vqmmc-supply = <&nvcc_sd1>; - MMC layer doesn't like it! */

View File

@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin(); kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);

View File

@ -539,6 +539,22 @@ config ARM64_ERRATUM_1463225
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_1542419
bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
default y
help
This option adds a workaround for ARM Neoverse-N1 erratum
1542419.
Affected Neoverse-N1 cores could execute a stale instruction when
modified by another CPU. The workaround depends on a firmware
counterpart.
Workaround the issue by hiding the DIC feature from EL0. This
forces user-space to perform cache maintenance.
If unsure, say Y.
config CAVIUM_ERRATUM_22375 config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313" bool "Cavium erratum 22375, 24313"
default y default y

View File

@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin(); kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);

View File

@ -11,6 +11,7 @@
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16 #define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0 #define CTR_IMINLINE_SHIFT 0
#define CTR_IMINLINE_MASK 0xf
#define CTR_ERG_SHIFT 20 #define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24 #define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15 #define CTR_CWG_MASK 15
@ -18,7 +19,7 @@
#define CTR_DIC_SHIFT 29 #define CTR_DIC_SHIFT 29
#define CTR_CACHE_MINLINE_MASK \ #define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)

View File

@ -5,6 +5,9 @@
#ifndef __ASM_COMPAT_H #ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H #define __ASM_COMPAT_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm-generic/compat.h>
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
/* /*
@ -14,8 +17,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <asm-generic/compat.h>
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__ #ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0" #define COMPAT_UTS_MACHINE "armv8b\0\0"

View File

@ -52,7 +52,8 @@
#define ARM64_HAS_IRQ_PRIO_MASKING 42 #define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43 #define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44 #define ARM64_WORKAROUND_1463225 44
#define ARM64_WORKAROUND_1542419 45
#define ARM64_NCAPS 45 #define ARM64_NCAPS 46
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */

View File

@ -49,7 +49,9 @@
#ifndef CONFIG_BROKEN_GAS_INST #ifndef CONFIG_BROKEN_GAS_INST
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define __emit_inst(x) .inst (x) // The space separator is omitted so that __emit_inst(x) can be parsed as
// either an assembler directive or an assembler macro argument.
#define __emit_inst(x) .inst(x)
#else #else
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t" #define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
#endif #endif

View File

@ -87,13 +87,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
} }
static void static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
{ {
u64 mask = arm64_ftr_reg_ctrel0.strict_mask; u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
bool enable_uct_trap = false;
/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
if ((read_cpuid_cachetype() & mask) != if ((read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & mask)) (arm64_ftr_reg_ctrel0.sys_val & mask))
enable_uct_trap = true;
/* ... or if the system is affected by an erratum */
if (cap->capability == ARM64_WORKAROUND_1542419)
enable_uct_trap = true;
if (enable_uct_trap)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
} }
@ -615,6 +623,18 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0); return (need_wa > 0);
} }
static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && has_dic;
}
#ifdef CONFIG_HARDEN_EL2_VECTORS #ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = { static const struct midr_range arm64_harden_el2_vectors[] = {
@ -878,6 +898,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225, .matches = has_cortex_a76_erratum_1463225,
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_1542419
{
/* we depend on the firmware portion for correctness */
.desc = "ARM erratum 1542419 (kernel portion)",
.capability = ARM64_WORKAROUND_1542419,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif #endif
{ {
} }

View File

@ -8,6 +8,7 @@
*/ */
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/cpufeature.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
@ -17,6 +18,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h> #include <asm/unistd.h>
static long static long
@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return 0; return 0;
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/*
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
*/
__tlbi(aside1is, __TLBI_VADDR(0, 0));
dsb(ish);
}
ret = __flush_cache_user_range(start, start + chunk); ret = __flush_cache_user_range(start, start + chunk);
if (ret) if (ret)
return ret; return ret;

View File

@ -480,6 +480,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = ESR_ELx_SYS64_ISS_RT(esr); int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/* Hide DIC so that we can trap the unnecessary maintenance...*/
val &= ~BIT(CTR_DIC_SHIFT);
/* ... and fake IminLine to reduce the number of traps. */
val &= ~CTR_IMINLINE_MASK;
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
}
pt_regs_write_reg(regs, rt, val); pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);

View File

@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i;
for (i = 0; i < 16; i++)
*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
}
out: out:
return err; return err;
} }

View File

@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
ptep = (pte_t *)pudp; ptep = (pte_t *)pudp;
} else if (sz == (PAGE_SIZE * CONT_PTES)) { } else if (sz == (PAGE_SIZE * CONT_PTES)) {
pmdp = pmd_alloc(mm, pudp, addr); pmdp = pmd_alloc(mm, pudp, addr);
if (!pmdp)
return NULL;
WARN_ON(addr & (sz - 1)); WARN_ON(addr & (sz - 1));
/* /*

View File

@ -171,16 +171,10 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define writew_relaxed __raw_writew #define writew_relaxed __raw_writew
#define writel_relaxed __raw_writel #define writel_relaxed __raw_writel
/* void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
* Need an mtype somewhere in here, for cache type deals? #define ioremap_nocache ioremap
* This is probably too long for an inline. #define ioremap_uc(X, Y) ioremap((X), (Y))
*/
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
return ioremap_nocache(phys_addr, size);
}
static inline void iounmap(volatile void __iomem *addr) static inline void iounmap(volatile void __iomem *addr)
{ {

View File

@ -20,7 +20,7 @@ EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie); EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield); EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);

View File

@ -9,7 +9,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{ {
unsigned long last_addr, addr; unsigned long last_addr, addr;
unsigned long offset = phys_addr & ~PAGE_MASK; unsigned long offset = phys_addr & ~PAGE_MASK;

View File

@ -697,7 +697,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1) stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */ stw r1,KSP(r3) /* Set old stack pointer */
kuap_check r2, r4 kuap_check r2, r0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the /* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all * previous task gets rescheduled on another CPU, it sees all

View File

@ -541,6 +541,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL); lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL) if (bsizep == NULL)
bsizep = lsizep; bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL) if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep); lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL) if (bsizep != NULL)

View File

@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void)
"i" (offsetof(struct paca_struct, irq_work_pending))); "i" (offsetof(struct paca_struct, irq_work_pending)));
} }
void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
/*
* Non-nmi code running with interrupts disabled will replay
* irq_happened before it re-enables interrupts, so setthe
* decrementer there instead of causing a hardware exception
* which would immediately hit the masked interrupt handler
* and have the net effect of setting the decrementer in
* irq_happened.
*
* NMI interrupts can not check this when they return, so the
* decrementer hardware exception is raised, which will fire
* when interrupts are next enabled.
*
* BookE does not support this yet, it must audit all NMI
* interrupt handlers to ensure they call nmi_enter() so this
* check would be correct.
*/
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
set_dec(1);
} else {
hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_DEC;
}
preempt_enable();
}
#else /* 32-bit */ #else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending); DEFINE_PER_CPU(u8, irq_work_pending);
@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
#endif /* 32 vs 64 bit */
void arch_irq_work_raise(void) void arch_irq_work_raise(void)
{ {
/*
* 64-bit code that uses irq soft-mask can just cause an immediate
* interrupt here that gets soft masked, if this is called under
* local_irq_disable(). It might be possible to prevent that happening
* by noticing interrupts are disabled and setting decrementer pending
* to be replayed when irqs are enabled. The problem there is that
* tracing can call irq_work_raise, including in code that does low
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
* which could get tangled up if we're messing with the same state
* here.
*/
preempt_disable(); preempt_disable();
set_irq_work_pending_flag(); set_irq_work_pending_flag();
set_dec(1); set_dec(1);
preempt_enable(); preempt_enable();
} }
#endif /* 32 vs 64 bit */
#else /* CONFIG_IRQ_WORK */ #else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0 #define test_irq_work_pending() 0

View File

@ -379,7 +379,7 @@ config PPC_KUAP
config PPC_KUAP_DEBUG config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection" bool "Extra debugging for Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32) depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
help help
Add extra debugging for Kernel Userspace Access Protection (KUAP) Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N. If you're unsure, say N.

View File

@ -121,7 +121,8 @@ void __init setup_bootmem(void)
BUG_ON(mem_size == 0); BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size)); set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
setup_initrd(); setup_initrd();

View File

@ -1927,6 +1927,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1; start = slot + 1;
} }
if (start >= slots->used_slots)
return slots->used_slots - 1;
if (gfn >= memslots[start].base_gfn && if (gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) { gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start); atomic_set(&slots->lru_slot, start);

View File

@ -115,7 +115,6 @@ static struct irq_chip zpci_irq_chip = {
.name = "PCI-MSI", .name = "PCI-MSI",
.irq_unmask = pci_msi_unmask_irq, .irq_unmask = pci_msi_unmask_irq,
.irq_mask = pci_msi_mask_irq, .irq_mask = pci_msi_mask_irq,
.irq_set_affinity = zpci_set_irq_affinity,
}; };
static void zpci_handle_cpu_local_irq(bool rescan) static void zpci_handle_cpu_local_irq(bool rescan)
@ -276,7 +275,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
rc = -EIO; rc = -EIO;
if (hwirq - bit >= msi_vecs) if (hwirq - bit >= msi_vecs)
break; break;
irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity); irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
(irq_delivery == DIRECTED) ?
msi->affinity : NULL);
if (irq < 0) if (irq < 0)
return -ENOMEM; return -ENOMEM;
rc = irq_set_msi_desc(irq, msi); rc = irq_set_msi_desc(irq, msi);

View File

@ -140,6 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include # When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h. # TT or skas makefiles and don't clean skas_ptregs.h.
CLEAN_FILES += linux x.i gmon.out CLEAN_FILES += linux x.i gmon.out
MRPROPER_DIRS += arch/$(SUBARCH)/include/generated
archclean: archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \

View File

@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);

View File

@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);

View File

@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
/*
* Push registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
* lower registers are likely clobbered well before they
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
.if \save_ret .if \save_ret
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
.endif .endif
pushq \rdx /* pt_regs->dx */ pushq \rdx /* pt_regs->dx */
xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
xorl %ecx, %ecx /* nospec cx */
pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */
xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11*/
pushq %rbx /* pt_regs->rbx */ pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */ pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */ pushq %r12 /* pt_regs->r12 */
xorl %r12d, %r12d /* nospec r12*/
pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */
xorl %r13d, %r13d /* nospec r13*/
pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */
xorl %r14d, %r14d /* nospec r14*/
pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15*/
UNWIND_HINT_REGS UNWIND_HINT_REGS
.if \save_ret .if \save_ret
pushq %rsi /* return address on top of stack */ pushq %rsi /* return address on top of stack */
.endif .endif
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
* well before they could be put to use in a speculative execution
* gadget.
*/
xorl %edx, %edx /* nospec dx */
xorl %ecx, %ecx /* nospec cx */
xorl %r8d, %r8d /* nospec r8 */
xorl %r9d, %r9d /* nospec r9 */
xorl %r10d, %r10d /* nospec r10 */
xorl %r11d, %r11d /* nospec r11 */
xorl %ebx, %ebx /* nospec rbx */
xorl %ebp, %ebp /* nospec rbp */
xorl %r12d, %r12d /* nospec r12 */
xorl %r13d, %r13d /* nospec r13 */
xorl %r14d, %r14d /* nospec r14 */
xorl %r15d, %r15d /* nospec r15 */
.endm .endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0

View File

@ -249,7 +249,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1 POP_REGS pop_rdi=0 skip_r11rcx=1
/* /*
@ -258,6 +257,7 @@ syscall_return_via_sysret:
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
pushq RSP-RDI(%rdi) /* RSP */ pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */ pushq (%rdi) /* RDI */
@ -512,7 +512,7 @@ END(spurious_entries_start)
* +----------------------------------------------------+ * +----------------------------------------------------+
*/ */
ENTRY(interrupt_entry) ENTRY(interrupt_entry)
UNWIND_HINT_FUNC UNWIND_HINT_IRET_REGS offset=16
ASM_CLAC ASM_CLAC
cld cld
@ -544,9 +544,9 @@ ENTRY(interrupt_entry)
pushq 5*8(%rdi) /* regs->eflags */ pushq 5*8(%rdi) /* regs->eflags */
pushq 4*8(%rdi) /* regs->cs */ pushq 4*8(%rdi) /* regs->cs */
pushq 3*8(%rdi) /* regs->ip */ pushq 3*8(%rdi) /* regs->ip */
UNWIND_HINT_IRET_REGS
pushq 2*8(%rdi) /* regs->orig_ax */ pushq 2*8(%rdi) /* regs->orig_ax */
pushq 8(%rdi) /* return address */ pushq 8(%rdi) /* return address */
UNWIND_HINT_FUNC
movq (%rdi), %rdi movq (%rdi), %rdi
jmp 2f jmp 2f
@ -637,6 +637,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
/* Copy the IRET frame to the trampoline stack. */ /* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */ pushq 6*8(%rdi) /* SS */
@ -1734,7 +1735,7 @@ ENTRY(rewind_stack_do_exit)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE UNWIND_HINT_REGS
call do_exit call do_exit
END(rewind_stack_do_exit) END(rewind_stack_do_exit)

View File

@ -1587,8 +1587,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{ {
/* We can only post Fixed and LowPrio IRQs */ /* We can only post Fixed and LowPrio IRQs */
return (irq->delivery_mode == dest_Fixed || return (irq->delivery_mode == APIC_DM_FIXED ||
irq->delivery_mode == dest_LowestPrio); irq->delivery_mode == APIC_DM_LOWEST);
} }
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)

View File

@ -19,7 +19,7 @@ struct unwind_state {
#if defined(CONFIG_UNWINDER_ORC) #if defined(CONFIG_UNWINDER_ORC)
bool signal, full_regs; bool signal, full_regs;
unsigned long sp, bp, ip; unsigned long sp, bp, ip;
struct pt_regs *regs; struct pt_regs *regs, *prev_regs;
#elif defined(CONFIG_UNWINDER_FRAME_POINTER) #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
bool got_irq; bool got_irq;
unsigned long *bp, *orig_sp, ip; unsigned long *bp, *orig_sp, ip;

View File

@ -220,8 +220,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES); ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO); ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
pr_info("Hyper-V: features 0x%x, hints 0x%x\n", pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
ms_hyperv.features, ms_hyperv.hints); ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS); ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);

View File

@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip)
{ {
static struct orc_entry *orc; static struct orc_entry *orc;
if (!orc_init)
return NULL;
if (ip == 0) if (ip == 0)
return &null_orc_entry; return &null_orc_entry;
@ -314,12 +311,19 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
{ {
struct task_struct *task = state->task;
if (unwind_done(state)) if (unwind_done(state))
return NULL; return NULL;
if (state->regs) if (state->regs)
return &state->regs->ip; return &state->regs->ip;
if (task != current && state->sp == task->thread.sp) {
struct inactive_task_frame *frame = (void *)task->thread.sp;
return &frame->ret_addr;
}
if (state->sp) if (state->sp)
return (unsigned long *)state->sp - 1; return (unsigned long *)state->sp - 1;
@ -378,9 +382,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
return true; return true;
} }
/*
* If state->regs is non-NULL, and points to a full pt_regs, just get the reg
* value from state->regs.
*
* Otherwise, if state->regs just points to IRET regs, and the previous frame
* had full regs, it's safe to get the value from the previous regs. This can
* happen when early/late IRQ entry code gets interrupted by an NMI.
*/
static bool get_reg(struct unwind_state *state, unsigned int reg_off,
unsigned long *val)
{
unsigned int reg = reg_off/8;
if (!state->regs)
return false;
if (state->full_regs) {
*val = ((unsigned long *)state->regs)[reg];
return true;
}
if (state->prev_regs) {
*val = ((unsigned long *)state->prev_regs)[reg];
return true;
}
return false;
}
bool unwind_next_frame(struct unwind_state *state) bool unwind_next_frame(struct unwind_state *state)
{ {
unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type; enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc; struct orc_entry *orc;
bool indirect = false; bool indirect = false;
@ -442,39 +475,35 @@ bool unwind_next_frame(struct unwind_state *state)
break; break;
case ORC_REG_R10: case ORC_REG_R10:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
orc_warn("missing regs for base reg R10 at ip %pB\n", orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->r10;
break; break;
case ORC_REG_R13: case ORC_REG_R13:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
orc_warn("missing regs for base reg R13 at ip %pB\n", orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->r13;
break; break;
case ORC_REG_DI: case ORC_REG_DI:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
orc_warn("missing regs for base reg DI at ip %pB\n", orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->di;
break; break;
case ORC_REG_DX: case ORC_REG_DX:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
orc_warn("missing regs for base reg DX at ip %pB\n", orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->dx;
break; break;
default: default:
@ -501,6 +530,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp = sp; state->sp = sp;
state->regs = NULL; state->regs = NULL;
state->prev_regs = NULL;
state->signal = false; state->signal = false;
break; break;
@ -512,6 +542,7 @@ bool unwind_next_frame(struct unwind_state *state)
} }
state->regs = (struct pt_regs *)sp; state->regs = (struct pt_regs *)sp;
state->prev_regs = NULL;
state->full_regs = true; state->full_regs = true;
state->signal = true; state->signal = true;
break; break;
@ -523,6 +554,8 @@ bool unwind_next_frame(struct unwind_state *state)
goto err; goto err;
} }
if (state->full_regs)
state->prev_regs = state->regs;
state->regs = (void *)sp - IRET_FRAME_OFFSET; state->regs = (void *)sp - IRET_FRAME_OFFSET;
state->full_regs = false; state->full_regs = false;
state->signal = true; state->signal = true;
@ -531,14 +564,14 @@ bool unwind_next_frame(struct unwind_state *state)
default: default:
orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
orc->type, (void *)orig_ip); orc->type, (void *)orig_ip);
break; goto err;
} }
/* Find BP: */ /* Find BP: */
switch (orc->bp_reg) { switch (orc->bp_reg) {
case ORC_REG_UNDEFINED: case ORC_REG_UNDEFINED:
if (state->regs && state->full_regs) if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
state->bp = state->regs->bp; state->bp = tmp;
break; break;
case ORC_REG_PREV_SP: case ORC_REG_PREV_SP:
@ -585,17 +618,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
state->task = task; state->task = task;
if (!orc_init)
goto err;
/* /*
* Refuse to unwind the stack of a task while it's executing on another * Refuse to unwind the stack of a task while it's executing on another
* CPU. This check is racy, but that's ok: the unwinder has other * CPU. This check is racy, but that's ok: the unwinder has other
* checks to prevent it from going off the rails. * checks to prevent it from going off the rails.
*/ */
if (task_on_another_cpu(task)) if (task_on_another_cpu(task))
goto done; goto err;
if (regs) { if (regs) {
if (user_mode(regs)) if (user_mode(regs))
goto done; goto the_end;
state->ip = regs->ip; state->ip = regs->ip;
state->sp = regs->sp; state->sp = regs->sp;
@ -628,6 +664,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
* generate some kind of backtrace if this happens. * generate some kind of backtrace if this happens.
*/ */
void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
state->error = true;
if (get_stack_info(next_page, state->task, &state->stack_info, if (get_stack_info(next_page, state->task, &state->stack_info,
&state->stack_mask)) &state->stack_mask))
return; return;
@ -648,13 +685,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */ /* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) && while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) || (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
state->sp <= (unsigned long)first_frame)) state->sp < (unsigned long)first_frame))
unwind_next_frame(state); unwind_next_frame(state);
return; return;
done: err:
state->error = true;
the_end:
state->stack_info.type = STACK_TYPE_UNKNOWN; state->stack_info.type = STACK_TYPE_UNKNOWN;
return;
} }
EXPORT_SYMBOL_GPL(__unwind_start); EXPORT_SYMBOL_GPL(__unwind_start);

View File

@ -1853,7 +1853,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return NULL; return NULL;
/* Pin the user virtual address. */ /* Pin the user virtual address. */
npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
if (npinned != npages) { if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages); pr_err("SEV: Failure locking %lu pages.\n", npages);
goto err; goto err;

View File

@ -86,6 +86,9 @@ ENTRY(vmx_vmexit)
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
/* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
or $1, %_ASM_AX
pop %_ASM_AX pop %_ASM_AX
.Lvmexit_skip_rsb: .Lvmexit_skip_rsb:
#endif #endif

View File

@ -2832,13 +2832,12 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
} }
/* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */ if (!enable_unrestricted_guest && !is_paging(vcpu))
if (is_guest_mode(vcpu))
update_guest_cr3 = false;
else if (enable_unrestricted_guest || is_paging(vcpu))
guest_cr3 = kvm_read_cr3(vcpu);
else
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
ept_load_pdptrs(vcpu); ept_load_pdptrs(vcpu);
} }
@ -4409,7 +4408,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
*/ */
static void kvm_machine_check(void) static void kvm_machine_check(void)
{ {
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) #if defined(CONFIG_X86_MCE)
struct pt_regs regs = { struct pt_regs regs = {
.cs = 3, /* Fake ring 3 no matter what the guest ran on */ .cs = 3, /* Fake ring 3 no matter what the guest ran on */
.flags = X86_EFLAGS_IF, .flags = X86_EFLAGS_IF,

View File

@ -7035,25 +7035,16 @@ static void kvm_set_mmio_spte_mask(void)
u64 mask; u64 mask;
/* /*
* Set the reserved bits and the present bit of an paging-structure * Set a reserved PA bit in MMIO SPTEs to generate page faults with
* entry to generate page fault with PFER.RSV = 1. * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
* paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
* 52-bit physical addresses then there are no reserved PA bits in the
* PTEs and so the reserved PA approach must be disabled.
*/ */
if (shadow_phys_bits < 52)
/* mask = BIT_ULL(51) | PT_PRESENT_MASK;
* Mask the uppermost physical address bit, which would be reserved as else
* long as the supported physical address width is less than 52. mask = 0;
*/
mask = 1ull << 51;
/* Set the present bit. */
mask |= 1ull;
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (shadow_phys_bits == 52)
mask &= ~1ull;
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
} }

View File

@ -138,6 +138,19 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX)); BIT(BPF_REG_AX));
} }
/*
* is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
* lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
* of encoding. al,cl,dl,bl have simpler encoding.
*/
static bool is_ereg_8l(u32 reg)
{
return is_ereg(reg) ||
(1 << reg) & (BIT(BPF_REG_1) |
BIT(BPF_REG_2) |
BIT(BPF_REG_FP));
}
static bool is_axreg(u32 reg) static bool is_axreg(u32 reg)
{ {
return reg == BPF_REG_0; return reg == BPF_REG_0;
@ -747,9 +760,8 @@ st: if (is_imm8(insn->off))
/* STX: *(u8*)(dst_reg + off) = src_reg */ /* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_B:
/* Emit 'mov byte ptr [rax + off], al' */ /* Emit 'mov byte ptr [rax + off], al' */
if (is_ereg(dst_reg) || is_ereg(src_reg) || if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
/* We have to add extra byte for x86 SIL, DIL regs */ /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
else else
EMIT1(0x88); EMIT1(0x88);

View File

@ -2056,7 +2056,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
STACK_VAR(dst_hi)); STACK_VAR(dst_hi));
EMIT(0x0, 4); EMIT(0x0, 4);
} else { } else {
EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0); /* xor dst_hi,dst_hi */
EMIT2(0x33,
add_2reg(0xC0, dst_hi, dst_hi));
} }
break; break;
case BPF_DW: case BPF_DW:
@ -2157,6 +2159,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP, add_2reg(0x40, IA32_EBP,
IA32_EDX), IA32_EDX),
STACK_VAR(dst_hi)); STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
} }
if (sstk) { if (sstk) {
@ -2215,8 +2224,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: { case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_lo = IA32_EAX;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 dreg_hi = IA32_EDX;
u8 sreg_lo = sstk ? IA32_ECX : src_lo; u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi; u8 sreg_hi = sstk ? IA32_EBX : src_hi;
@ -2252,8 +2261,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: { case BPF_JMP32 | BPF_JSET | BPF_K: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_lo = IA32_EAX;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 dreg_hi = IA32_EDX;
u8 sreg_lo = IA32_ECX; u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX; u8 sreg_hi = IA32_EBX;
u32 hi; u32 hi;
@ -2266,6 +2275,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP, add_2reg(0x40, IA32_EBP,
IA32_EDX), IA32_EDX),
STACK_VAR(dst_hi)); STACK_VAR(dst_hi));
} else {
/* mov dreg_lo,dst_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
if (is_jmp64)
/* mov dreg_hi,dst_hi */
EMIT2(0x89,
add_2reg(0xC0, dreg_hi, dst_hi));
} }
/* mov ecx,imm32 */ /* mov ecx,imm32 */

View File

@ -1224,8 +1224,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist); rq = list_first_entry(list, struct request, queuelist);
hctx = rq->mq_hctx; hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
blk_mq_put_driver_tag(rq);
break; break;
}
if (!blk_mq_get_driver_tag(rq)) { if (!blk_mq_get_driver_tag(rq)) {
/* /*

View File

@ -220,13 +220,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end: end:
if (result) { if (result) {
dev_warn(&device->dev, "Failed to change power state to %s\n", dev_warn(&device->dev, "Failed to change power state to %s\n",
acpi_power_state_string(state)); acpi_power_state_string(target_state));
} else { } else {
device->power.state = target_state; device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n", "Device [%s] transitioned to %s\n",
device->pnp.bus_id, device->pnp.bus_id,
acpi_power_state_string(state))); acpi_power_state_string(target_state)));
} }
return result; return result;

View File

@ -739,7 +739,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
if (is_async(dev)) { if (is_async(dev)) {
get_device(dev); get_device(dev);
async_schedule(func, dev); async_schedule_dev(func, dev);
return true; return true;
} }

View File

@ -435,11 +435,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* information. * information.
*/ */
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
int ret; int ret;
mode |= FALLOC_FL_KEEP_SIZE; mode |= FALLOC_FL_KEEP_SIZE;
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) { if (!blk_queue_discard(q)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
} }
@ -889,28 +890,47 @@ static void loop_config_discard(struct loop_device *lo)
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue; struct request_queue *q = lo->lo_queue;
/*
* If the backing device is a block device, mirror its zeroing
* capability. Set the discard sectors to the block device's zeroing
* capabilities because loop discards result in blkdev_issue_zeroout(),
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
struct request_queue *backingq;
backingq = bdev_get_queue(inode->i_bdev);
blk_queue_max_discard_sectors(q,
backingq->limits.max_write_zeroes_sectors);
blk_queue_max_write_zeroes_sectors(q,
backingq->limits.max_write_zeroes_sectors);
/* /*
* We use punch hole to reclaim the free space used by the * We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker * encryption is enabled, because it may give an attacker
* useful information. * useful information.
*/ */
if ((!file->f_op->fallocate) || } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0; q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0); blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return; } else {
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
} }
q->limits.discard_granularity = inode->i_sb->s_blocksize; if (q->limits.max_write_zeroes_sectors)
q->limits.discard_alignment = 0; blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
else
blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
} }
static void loop_unprepare_queue(struct loop_device *lo) static void loop_unprepare_queue(struct loop_device *lo)

View File

@ -345,9 +345,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC) if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx); blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
if (err == -ENOMEM || err == -ENOSPC) switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE; return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR; case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
} }
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Copyright (C) 2012 IBM Corporation * Copyright (C) 2012-2020 IBM Corporation
* *
* Author: Ashley Lai <ashleydlai@gmail.com> * Author: Ashley Lai <ashleydlai@gmail.com>
* *
@ -133,6 +133,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return len; return len;
} }
/**
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ibmvtpm: vtpm device struct
*
* Return:
* 0 on success.
* Non-zero on failure.
*/
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"%s failed rc=%d\n", __func__, rc);
return rc;
}
/**
* tpm_ibmvtpm_resume - Resume from suspend
*
* @dev: device struct
*
* Return: Always 0.
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ,
ibmvtpm->vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc) {
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
return rc;
}
rc = vio_enable_interrupts(ibmvtpm->vdev);
if (rc) {
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
return rc;
}
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
dev_err(dev, "Error send_init rc=%d\n", rc);
return rc;
}
/** /**
* tpm_ibmvtpm_send() - Send a TPM command * tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct * @chip: tpm chip struct
@ -146,6 +204,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{ {
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
int rc, sig; int rc, sig;
if (!ibmvtpm->rtce_buf) { if (!ibmvtpm->rtce_buf) {
@ -179,18 +238,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/ */
ibmvtpm->tpm_processing_cmd = true; ibmvtpm->tpm_processing_cmd = true;
again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev, rc = ibmvtpm_send_crq(ibmvtpm->vdev,
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND, IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
count, ibmvtpm->rtce_dma_handle); count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) { if (rc != H_SUCCESS) {
/*
* H_CLOSED can be returned after LPM resume. Call
* tpm_ibmvtpm_resume() to re-enable the CRQ then retry
* ibmvtpm_send_crq() once before failing.
*/
if (rc == H_CLOSED && retry) {
tpm_ibmvtpm_resume(ibmvtpm->dev);
retry = false;
goto again;
}
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
ibmvtpm->tpm_processing_cmd = false; ibmvtpm->tpm_processing_cmd = false;
} else }
rc = 0;
spin_unlock(&ibmvtpm->rtce_lock); spin_unlock(&ibmvtpm->rtce_lock);
return rc; return 0;
} }
static void tpm_ibmvtpm_cancel(struct tpm_chip *chip) static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
@ -268,26 +336,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
return rc; return rc;
} }
/**
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ibmvtpm: vtpm device struct
*
* Return:
* 0 on success.
* Non-zero on failure.
*/
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init failed rc=%d\n", rc);
return rc;
}
/** /**
* tpm_ibmvtpm_remove - ibm vtpm remove entry point * tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @vdev: vio device struct * @vdev: vio device struct
@ -400,44 +448,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE); ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
} }
/**
* tpm_ibmvtpm_resume - Resume from suspend
*
* @dev: device struct
*
* Return: Always 0.
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ,
ibmvtpm->vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc) {
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
return rc;
}
rc = vio_enable_interrupts(ibmvtpm->vdev);
if (rc) {
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
return rc;
}
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
dev_err(dev, "Error send_init rc=%d\n", rc);
return rc;
}
static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status) static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
{ {
return (status == 0); return (status == 0);

View File

@ -433,6 +433,9 @@ static void disable_interrupts(struct tpm_chip *chip)
u32 intmask; u32 intmask;
int rc; int rc;
if (priv->irq == 0)
return;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0) if (rc < 0)
intmask = 0; intmask = 0;
@ -984,9 +987,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (irq) { if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED, tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq); irq);
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
dev_err(&chip->dev, FW_BUG dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n"); "TPM interrupt not working, polling instead\n");
disable_interrupts(chip);
}
} else { } else {
tpm_tis_probe_irq(chip, intmask); tpm_tis_probe_irq(chip, intmask);
} }

View File

@ -42,6 +42,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
* @base: base port address of the IIO device * @base: base port address of the IIO device
*/ */
struct quad8_iio { struct quad8_iio {
struct mutex lock;
struct counter_device counter; struct counter_device counter;
unsigned int preset[QUAD8_NUM_COUNTERS]; unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS]; unsigned int count_mode[QUAD8_NUM_COUNTERS];
@ -116,6 +117,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
/* Borrow XOR Carry effectively doubles count range */ /* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24; *val = (borrow ^ carry) << 24;
mutex_lock(&priv->lock);
/* Reset Byte Pointer; transfer Counter to Output Latch */ /* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1); base_offset + 1);
@ -123,6 +126,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
*val |= (unsigned int)inb(base_offset) << (8 * i); *val |= (unsigned int)inb(base_offset) << (8 * i);
mutex_unlock(&priv->lock);
return IIO_VAL_INT; return IIO_VAL_INT;
case IIO_CHAN_INFO_ENABLE: case IIO_CHAN_INFO_ENABLE:
*val = priv->ab_enable[chan->channel]; *val = priv->ab_enable[chan->channel];
@ -153,6 +158,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 0xFFFFFF) if ((unsigned int)val > 0xFFFFFF)
return -EINVAL; return -EINVAL;
mutex_lock(&priv->lock);
/* Reset Byte Pointer */ /* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@ -176,12 +183,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Reset Error flag */ /* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
mutex_unlock(&priv->lock);
return 0; return 0;
case IIO_CHAN_INFO_ENABLE: case IIO_CHAN_INFO_ENABLE:
/* only boolean values accepted */ /* only boolean values accepted */
if (val < 0 || val > 1) if (val < 0 || val > 1)
return -EINVAL; return -EINVAL;
mutex_lock(&priv->lock);
priv->ab_enable[chan->channel] = val; priv->ab_enable[chan->channel] = val;
ior_cfg = val | priv->preset_enable[chan->channel] << 1; ior_cfg = val | priv->preset_enable[chan->channel] << 1;
@ -189,11 +200,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Load I/O control configuration */ /* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
mutex_unlock(&priv->lock);
return 0; return 0;
case IIO_CHAN_INFO_SCALE: case IIO_CHAN_INFO_SCALE:
mutex_lock(&priv->lock);
/* Quadrature scaling only available in quadrature mode */ /* Quadrature scaling only available in quadrature mode */
if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1)) if (!priv->quadrature_mode[chan->channel] &&
(val2 || val != 1)) {
mutex_unlock(&priv->lock);
return -EINVAL; return -EINVAL;
}
/* Only three gain states (1, 0.5, 0.25) */ /* Only three gain states (1, 0.5, 0.25) */
if (val == 1 && !val2) if (val == 1 && !val2)
@ -207,11 +225,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
priv->quadrature_scale[chan->channel] = 2; priv->quadrature_scale[chan->channel] = 2;
break; break;
default: default:
mutex_unlock(&priv->lock);
return -EINVAL; return -EINVAL;
} }
else else {
mutex_unlock(&priv->lock);
return -EINVAL; return -EINVAL;
}
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -248,6 +270,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
if (preset > 0xFFFFFF) if (preset > 0xFFFFFF)
return -EINVAL; return -EINVAL;
mutex_lock(&priv->lock);
priv->preset[chan->channel] = preset; priv->preset[chan->channel] = preset;
/* Reset Byte Pointer */ /* Reset Byte Pointer */
@ -257,6 +281,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
outb(preset >> (8 * i), base_offset); outb(preset >> (8 * i), base_offset);
mutex_unlock(&priv->lock);
return len; return len;
} }
@ -286,6 +312,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Preset enable is active low in Input/Output Control register */ /* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable; preset_enable = !preset_enable;
mutex_lock(&priv->lock);
priv->preset_enable[chan->channel] = preset_enable; priv->preset_enable[chan->channel] = preset_enable;
ior_cfg = priv->ab_enable[chan->channel] | ior_cfg = priv->ab_enable[chan->channel] |
@ -294,6 +322,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Load I/O control configuration to Input / Output Control Register */ /* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset); outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
mutex_unlock(&priv->lock);
return len; return len;
} }
@ -351,6 +381,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
unsigned int mode_cfg = cnt_mode << 1; unsigned int mode_cfg = cnt_mode << 1;
const int base_offset = priv->base + 2 * chan->channel + 1; const int base_offset = priv->base + 2 * chan->channel + 1;
mutex_lock(&priv->lock);
priv->count_mode[chan->channel] = cnt_mode; priv->count_mode[chan->channel] = cnt_mode;
/* Add quadrature mode configuration */ /* Add quadrature mode configuration */
@ -360,6 +392,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */ /* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset); outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -387,19 +421,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int synchronous_mode) const struct iio_chan_spec *chan, unsigned int synchronous_mode)
{ {
struct quad8_iio *const priv = iio_priv(indio_dev); struct quad8_iio *const priv = iio_priv(indio_dev);
const unsigned int idr_cfg = synchronous_mode |
priv->index_polarity[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1; const int base_offset = priv->base + 2 * chan->channel + 1;
unsigned int idr_cfg = synchronous_mode;
mutex_lock(&priv->lock);
idr_cfg |= priv->index_polarity[chan->channel] << 1;
/* Index function must be non-synchronous in non-quadrature mode */ /* Index function must be non-synchronous in non-quadrature mode */
if (synchronous_mode && !priv->quadrature_mode[chan->channel]) if (synchronous_mode && !priv->quadrature_mode[chan->channel]) {
mutex_unlock(&priv->lock);
return -EINVAL; return -EINVAL;
}
priv->synchronous_mode[chan->channel] = synchronous_mode; priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */ /* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset); outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -427,8 +468,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int quadrature_mode) const struct iio_chan_spec *chan, unsigned int quadrature_mode)
{ {
struct quad8_iio *const priv = iio_priv(indio_dev); struct quad8_iio *const priv = iio_priv(indio_dev);
unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1; const int base_offset = priv->base + 2 * chan->channel + 1;
unsigned int mode_cfg;
mutex_lock(&priv->lock);
mode_cfg = priv->count_mode[chan->channel] << 1;
if (quadrature_mode) if (quadrature_mode)
mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3; mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
@ -446,6 +491,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */ /* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset); outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -473,15 +520,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int index_polarity) const struct iio_chan_spec *chan, unsigned int index_polarity)
{ {
struct quad8_iio *const priv = iio_priv(indio_dev); struct quad8_iio *const priv = iio_priv(indio_dev);
const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
index_polarity << 1;
const int base_offset = priv->base + 2 * chan->channel + 1; const int base_offset = priv->base + 2 * chan->channel + 1;
unsigned int idr_cfg = index_polarity << 1;
mutex_lock(&priv->lock);
idr_cfg |= priv->synchronous_mode[chan->channel];
priv->index_polarity[chan->channel] = index_polarity; priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */ /* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset); outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -585,7 +637,7 @@ static int quad8_signal_read(struct counter_device *counter,
static int quad8_count_read(struct counter_device *counter, static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, struct counter_count_read_value *val) struct counter_count *count, struct counter_count_read_value *val)
{ {
const struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id; const int base_offset = priv->base + 2 * count->id;
unsigned int flags; unsigned int flags;
unsigned int borrow; unsigned int borrow;
@ -600,6 +652,8 @@ static int quad8_count_read(struct counter_device *counter,
/* Borrow XOR Carry effectively doubles count range */ /* Borrow XOR Carry effectively doubles count range */
position = (unsigned long)(borrow ^ carry) << 24; position = (unsigned long)(borrow ^ carry) << 24;
mutex_lock(&priv->lock);
/* Reset Byte Pointer; transfer Counter to Output Latch */ /* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1); base_offset + 1);
@ -607,6 +661,8 @@ static int quad8_count_read(struct counter_device *counter,
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
position |= (unsigned long)inb(base_offset) << (8 * i); position |= (unsigned long)inb(base_offset) << (8 * i);
mutex_unlock(&priv->lock);
counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position); counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
return 0; return 0;
@ -615,7 +671,7 @@ static int quad8_count_read(struct counter_device *counter,
static int quad8_count_write(struct counter_device *counter, static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, struct counter_count_write_value *val) struct counter_count *count, struct counter_count_write_value *val)
{ {
const struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id; const int base_offset = priv->base + 2 * count->id;
int err; int err;
unsigned long position; unsigned long position;
@ -630,6 +686,8 @@ static int quad8_count_write(struct counter_device *counter,
if (position > 0xFFFFFF) if (position > 0xFFFFFF)
return -EINVAL; return -EINVAL;
mutex_lock(&priv->lock);
/* Reset Byte Pointer */ /* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@ -653,6 +711,8 @@ static int quad8_count_write(struct counter_device *counter,
/* Reset Error flag */ /* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -673,13 +733,13 @@ static enum counter_count_function quad8_count_functions_list[] = {
static int quad8_function_get(struct counter_device *counter, static int quad8_function_get(struct counter_device *counter,
struct counter_count *count, size_t *function) struct counter_count *count, size_t *function)
{ {
const struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const int id = count->id; const int id = count->id;
const unsigned int quadrature_mode = priv->quadrature_mode[id];
const unsigned int scale = priv->quadrature_scale[id];
if (quadrature_mode) mutex_lock(&priv->lock);
switch (scale) {
if (priv->quadrature_mode[id])
switch (priv->quadrature_scale[id]) {
case 0: case 0:
*function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
break; break;
@ -693,6 +753,8 @@ static int quad8_function_get(struct counter_device *counter,
else else
*function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -703,10 +765,15 @@ static int quad8_function_set(struct counter_device *counter,
const int id = count->id; const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id; unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id; unsigned int *const scale = priv->quadrature_scale + id;
unsigned int mode_cfg = priv->count_mode[id] << 1;
unsigned int *const synchronous_mode = priv->synchronous_mode + id; unsigned int *const synchronous_mode = priv->synchronous_mode + id;
const unsigned int idr_cfg = priv->index_polarity[id] << 1;
const int base_offset = priv->base + 2 * id + 1; const int base_offset = priv->base + 2 * id + 1;
unsigned int mode_cfg;
unsigned int idr_cfg;
mutex_lock(&priv->lock);
mode_cfg = priv->count_mode[id] << 1;
idr_cfg = priv->index_polarity[id] << 1;
if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
*quadrature_mode = 0; *quadrature_mode = 0;
@ -742,6 +809,8 @@ static int quad8_function_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */ /* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset); outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -858,15 +927,20 @@ static int quad8_index_polarity_set(struct counter_device *counter,
{ {
struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16; const size_t channel_id = signal->id - 16;
const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
index_polarity << 1;
const int base_offset = priv->base + 2 * channel_id + 1; const int base_offset = priv->base + 2 * channel_id + 1;
unsigned int idr_cfg = index_polarity << 1;
mutex_lock(&priv->lock);
idr_cfg |= priv->synchronous_mode[channel_id];
priv->index_polarity[channel_id] = index_polarity; priv->index_polarity[channel_id] = index_polarity;
/* Load Index Control configuration to Index Control Register */ /* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset); outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -893,19 +967,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
{ {
struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16; const size_t channel_id = signal->id - 16;
const unsigned int idr_cfg = synchronous_mode |
priv->index_polarity[channel_id] << 1;
const int base_offset = priv->base + 2 * channel_id + 1; const int base_offset = priv->base + 2 * channel_id + 1;
unsigned int idr_cfg = synchronous_mode;
mutex_lock(&priv->lock);
idr_cfg |= priv->index_polarity[channel_id] << 1;
/* Index function must be non-synchronous in non-quadrature mode */ /* Index function must be non-synchronous in non-quadrature mode */
if (synchronous_mode && !priv->quadrature_mode[channel_id]) if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
mutex_unlock(&priv->lock);
return -EINVAL; return -EINVAL;
}
priv->synchronous_mode[channel_id] = synchronous_mode; priv->synchronous_mode[channel_id] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */ /* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset); outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -970,6 +1051,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
break; break;
} }
mutex_lock(&priv->lock);
priv->count_mode[count->id] = cnt_mode; priv->count_mode[count->id] = cnt_mode;
/* Set count mode configuration value */ /* Set count mode configuration value */
@ -982,6 +1065,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */ /* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset); outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
mutex_unlock(&priv->lock);
return 0; return 0;
} }
@ -1023,6 +1108,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
if (err) if (err)
return err; return err;
mutex_lock(&priv->lock);
priv->ab_enable[count->id] = ab_enable; priv->ab_enable[count->id] = ab_enable;
ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
@ -1030,6 +1117,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
/* Load I/O control configuration */ /* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
mutex_unlock(&priv->lock);
return len; return len;
} }
@ -1058,14 +1147,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter,
return sprintf(buf, "%u\n", priv->preset[count->id]); return sprintf(buf, "%u\n", priv->preset[count->id]);
} }
static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id,
unsigned int preset)
{
const unsigned int base_offset = quad8iio->base + 2 * id;
int i;
quad8iio->preset[id] = preset;
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register */
for (i = 0; i < 3; i++)
outb(preset >> (8 * i), base_offset);
}
static ssize_t quad8_count_preset_write(struct counter_device *counter, static ssize_t quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len) struct counter_count *count, void *private, const char *buf, size_t len)
{ {
struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int preset; unsigned int preset;
int ret; int ret;
int i;
ret = kstrtouint(buf, 0, &preset); ret = kstrtouint(buf, 0, &preset);
if (ret) if (ret)
@ -1075,14 +1178,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
if (preset > 0xFFFFFF) if (preset > 0xFFFFFF)
return -EINVAL; return -EINVAL;
priv->preset[count->id] = preset; mutex_lock(&priv->lock);
/* Reset Byte Pointer */ quad8_preset_register_set(priv, count->id, preset);
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register */ mutex_unlock(&priv->lock);
for (i = 0; i < 3; i++)
outb(preset >> (8 * i), base_offset);
return len; return len;
} }
@ -1090,15 +1190,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
static ssize_t quad8_count_ceiling_read(struct counter_device *counter, static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, void *private, char *buf) struct counter_count *count, void *private, char *buf)
{ {
const struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */ /* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) { switch (priv->count_mode[count->id]) {
case 1: case 1:
case 3: case 3:
return quad8_count_preset_read(counter, count, private, buf); mutex_unlock(&priv->lock);
return sprintf(buf, "%u\n", priv->preset[count->id]);
} }
mutex_unlock(&priv->lock);
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
return sprintf(buf, "33554431\n"); return sprintf(buf, "33554431\n");
} }
@ -1107,15 +1212,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len) struct counter_count *count, void *private, const char *buf, size_t len)
{ {
struct quad8_iio *const priv = counter->priv; struct quad8_iio *const priv = counter->priv;
unsigned int ceiling;
int ret;
ret = kstrtouint(buf, 0, &ceiling);
if (ret)
return ret;
/* Only 24-bit values are supported */
if (ceiling > 0xFFFFFF)
return -EINVAL;
mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */ /* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) { switch (priv->count_mode[count->id]) {
case 1: case 1:
case 3: case 3:
return quad8_count_preset_write(counter, count, private, buf, quad8_preset_register_set(priv, count->id, ceiling);
len); break;
} }
mutex_unlock(&priv->lock);
return len; return len;
} }
@ -1143,6 +1262,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Preset enable is active low in Input/Output Control register */ /* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable; preset_enable = !preset_enable;
mutex_lock(&priv->lock);
priv->preset_enable[count->id] = preset_enable; priv->preset_enable[count->id] = preset_enable;
ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
@ -1150,6 +1271,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Load I/O control configuration to Input / Output Control Register */ /* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset); outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
mutex_unlock(&priv->lock);
return len; return len;
} }
@ -1320,6 +1443,9 @@ static int quad8_probe(struct device *dev, unsigned int id)
quad8iio->counter.priv = quad8iio; quad8iio->counter.priv = quad8iio;
quad8iio->base = base[id]; quad8iio->base = base[id];
/* Initialize mutex */
mutex_init(&quad8iio->lock);
/* Reset all counters and disable interrupt function */ /* Reset all counters and disable interrupt function */
outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP); outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */ /* Set initial configuration for all counters */

View File

@ -125,8 +125,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
atomic_set(&dev->inflight, 0); atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex); mutex_lock(&drv_data.drv_mutex);
list_add_tail(&u_ctx->entry, &drv_data.inact_dev); list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
if (!drv_data.last_dev)
drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex); mutex_unlock(&drv_data.drv_mutex);
} }

View File

@ -902,7 +902,9 @@ int devfreq_suspend_device(struct devfreq *devfreq)
} }
if (devfreq->suspend_freq) { if (devfreq->suspend_freq) {
mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
mutex_unlock(&devfreq->lock);
if (ret) if (ret)
return ret; return ret;
} }
@ -930,7 +932,9 @@ int devfreq_resume_device(struct devfreq *devfreq)
return 0; return 0;
if (devfreq->resume_freq) { if (devfreq->resume_freq) {
mutex_lock(&devfreq->lock);
ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
mutex_unlock(&devfreq->lock);
if (ret) if (ret)
return ret; return ret;
} }

View File

@ -235,7 +235,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
struct dmatest_thread *thread; struct dmatest_thread *thread;
list_for_each_entry(thread, &dtc->threads, node) { list_for_each_entry(thread, &dtc->threads, node) {
if (!thread->done) if (!thread->done && !thread->pending)
return true; return true;
} }
} }
@ -654,8 +654,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get(); ktime = ktime_get();
while (!kthread_should_stop() while (!(kthread_should_stop() ||
&& !(params->iterations && total_tests >= params->iterations)) { (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um; struct dmaengine_unmap_data *um;
dma_addr_t *dsts; dma_addr_t *dsts;

View File

@ -75,7 +75,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true; adev->pm.ac_power = true;
else else
adev->pm.ac_power = false; adev->pm.ac_power = false;
if (adev->powerplay.pp_funcs->enable_bapm) if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} }

View File

@ -2511,7 +2511,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const union dc_tiling_info *tiling_info, const union dc_tiling_info *tiling_info,
const uint64_t info, const uint64_t info,
struct dc_plane_dcc_param *dcc, struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
struct dc *dc = adev->dm.dc; struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input; struct dc_dcc_surface_param input;
@ -2523,6 +2524,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&input, 0, sizeof(input)); memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output)); memset(&output, 0, sizeof(output));
if (force_disable_dcc)
return 0;
if (!offset) if (!offset)
return 0; return 0;
@ -2572,7 +2576,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
union dc_tiling_info *tiling_info, union dc_tiling_info *tiling_info,
union plane_size *plane_size, union plane_size *plane_size,
struct dc_plane_dcc_param *dcc, struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
const struct drm_framebuffer *fb = &afb->base; const struct drm_framebuffer *fb = &afb->base;
int ret; int ret;
@ -2674,7 +2679,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = fill_plane_dcc_attributes(adev, afb, format, rotation, ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info, plane_size, tiling_info,
tiling_flags, dcc, address); tiling_flags, dcc, address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
} }
@ -2766,7 +2772,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state, const struct drm_plane_state *plane_state,
const uint64_t tiling_flags, const uint64_t tiling_flags,
struct dc_plane_info *plane_info, struct dc_plane_info *plane_info,
struct dc_plane_address *address) struct dc_plane_address *address,
bool force_disable_dcc)
{ {
const struct drm_framebuffer *fb = plane_state->fb; const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb = const struct amdgpu_framebuffer *afb =
@ -2843,7 +2850,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags, plane_info->rotation, tiling_flags,
&plane_info->tiling_info, &plane_info->tiling_info,
&plane_info->plane_size, &plane_info->plane_size,
&plane_info->dcc, address); &plane_info->dcc, address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
@ -2865,6 +2873,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info; struct dc_plane_info plane_info;
uint64_t tiling_flags; uint64_t tiling_flags;
int ret; int ret;
bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info); ret = fill_dc_scaling_info(plane_state, &scaling_info);
if (ret) if (ret)
@ -2879,9 +2888,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
if (ret) if (ret)
return ret; return ret;
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info, &plane_info,
&dc_plane_state->address); &dc_plane_state->address,
force_disable_dcc);
if (ret) if (ret)
return ret; return ret;
@ -4103,6 +4114,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags; uint64_t tiling_flags;
uint32_t domain; uint32_t domain;
int r; int r;
bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state); dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state); dm_plane_state_new = to_dm_plane_state(new_state);
@ -4153,11 +4165,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes( fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation, adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info, tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc, &plane_state->plane_size, &plane_state->dcc,
&plane_state->address); &plane_state->address,
force_disable_dcc);
} }
return 0; return 0;
@ -5363,7 +5377,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_plane_info_and_addr( fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags, dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count], &bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address); &bundle->flip_addrs[planes_count].address,
false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
bundle->surface_updates[planes_count].plane_info = bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count]; &bundle->plane_infos[planes_count];
@ -6553,6 +6572,12 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
continue; continue;
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(new_plane_state->fb);
struct dc_plane_info plane_info;
struct dc_flip_addrs flip_addr;
uint64_t tiling_flags;
new_plane_crtc = new_plane_state->crtc; new_plane_crtc = new_plane_state->crtc;
old_plane_crtc = old_plane_state->crtc; old_plane_crtc = old_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state); new_dm_plane_state = to_dm_plane_state(new_plane_state);
@ -6594,6 +6619,25 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
updates[num_plane].scaling_info = &scaling_info; updates[num_plane].scaling_info = &scaling_info;
if (amdgpu_fb) {
ret = get_fb_info(amdgpu_fb, &tiling_flags);
if (ret)
goto cleanup;
memset(&flip_addr, 0, sizeof(flip_addr));
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&plane_info,
&flip_addr.address,
false);
if (ret)
goto cleanup;
updates[num_plane].plane_info = &plane_info;
updates[num_plane].flip_addr = &flip_addr;
}
num_plane++; num_plane++;
} }

View File

@ -254,6 +254,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0; int i = 0;
bool ret = false; bool ret = false;
stream->adjust = *adjust;
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@ -1216,6 +1218,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK); return (result == DC_OK);
} }
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
struct pipe_ctx *pipe;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
/* Must set to false to start with, due to OR in update function */
pipe->plane_state->status.is_flip_pending = false;
dc->hwss.update_pending_status(pipe);
if (pipe->plane_state->status.is_flip_pending)
return true;
}
return false;
}
bool dc_post_update_surfaces_to_stream(struct dc *dc) bool dc_post_update_surfaces_to_stream(struct dc *dc)
{ {
int i; int i;
@ -1226,6 +1248,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc); post_surface_trace(dc);
if (is_flip_pending_in_pipes(dc, context))
return true;
for (i = 0; i < dc->res_pool->pipe_count; i++) for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL || if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) { context->res_ctx.pipe_ctx[i].plane_state == NULL) {

View File

@ -984,6 +984,32 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr, struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{ {
hwmgr->thermal_controller.ucType =
powerplay_table->sThermalController.ucType;
hwmgr->thermal_controller.ucI2cLine =
powerplay_table->sThermalController.ucI2cLine;
hwmgr->thermal_controller.ucI2cAddress =
powerplay_table->sThermalController.ucI2cAddress;
hwmgr->thermal_controller.fanInfo.bNoFan =
(0 != (powerplay_table->sThermalController.ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN));
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
powerplay_table->sThermalController.ucFanParameters &
ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
hwmgr->thermal_controller.fanInfo.ulMinRPM
= powerplay_table->sThermalController.ucFanMinRPM * 100UL;
hwmgr->thermal_controller.fanInfo.ulMaxRPM
= powerplay_table->sThermalController.ucFanMaxRPM * 100UL;
set_hw_cap(hwmgr,
ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
PHM_PlatformCaps_ThermalController);
hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0; return 0;
} }

View File

@ -4688,7 +4688,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode; struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] | unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) | (timings->pixel_clock[1] << 8) |
(timings->pixel_clock[2] << 16)); (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;

View File

@ -16333,8 +16333,11 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
static void intel_early_display_was(struct drm_i915_private *dev_priv) static void intel_early_display_was(struct drm_i915_private *dev_priv)
{ {
/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */ /*
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
DARBF_GATING_DIS); DARBF_GATING_DIS);

View File

@ -478,9 +478,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret; return ret;
ret = qxl_release_reserve_list(release, true); ret = qxl_release_reserve_list(release, true);
if (ret) if (ret) {
qxl_release_free(qdev, release);
return ret; return ret;
}
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE; cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA; cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@ -497,8 +498,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo, /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface since it is only released when we ask to destroy the surface
and it would never signal otherwise */ and it would never signal otherwise */
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true; surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock); spin_lock(&qdev->surf_id_idr_lock);
@ -540,9 +541,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id; cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0; return 0;
} }

View File

@ -520,8 +520,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1; cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret; return ret;
@ -662,8 +662,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y; cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
if (old_cursor_bo != NULL) if (old_cursor_bo != NULL)
qxl_bo_unpin(old_cursor_bo); qxl_bo_unpin(old_cursor_bo);
@ -710,8 +710,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE; cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
} }
static void qxl_update_dumb_head(struct qxl_device *qdev, static void qxl_update_dumb_head(struct qxl_device *qdev,

View File

@ -207,9 +207,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff; goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo); rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects) if (!rects) {
ret = -EINVAL;
goto out_release_backoff; goto out_release_backoff;
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS; drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@ -240,8 +241,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
} }
qxl_bo_kunmap(clips_bo); qxl_bo_kunmap(clips_bo);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release); qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff: out_release_backoff:
if (ret) if (ret)

View File

@ -258,11 +258,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]); apply_surf_reloc(qdev, &reloc_info[i]);
} }
qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
if (ret)
qxl_release_backoff_reserve_list(release);
else
qxl_release_fence_buffer_objects(release);
out_free_bos: out_free_bos:
out_free_release: out_free_release:

View File

@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
int res; int res;
mutex_lock(&usbhid->mutex);
set_bit(HID_OPENED, &usbhid->iofl); set_bit(HID_OPENED, &usbhid->iofl);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
return 0; res = 0;
goto Done;
}
res = usb_autopm_get_interface(usbhid->intf); res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */ /* the device must be awake to reliably request remote wakeup */
if (res < 0) { if (res < 0) {
clear_bit(HID_OPENED, &usbhid->iofl); clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO; res = -EIO;
goto Done;
} }
usbhid->intf->needs_remote_wakeup = 1; usbhid->intf->needs_remote_wakeup = 1;
@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
msleep(50); msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
Done:
mutex_unlock(&usbhid->mutex);
return res; return res;
} }
@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
{ {
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
mutex_lock(&usbhid->mutex);
/* /*
* Make sure we don't restart data acquisition due to * Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing * a resumption we no longer care about by avoiding racing
@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
clear_bit(HID_IN_POLLING, &usbhid->iofl); clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock); spin_unlock_irq(&usbhid->lock);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
return; hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
}
hid_cancel_delayed_stuff(usbhid); mutex_unlock(&usbhid->mutex);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
} }
/* /*
@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
unsigned int n, insize = 0; unsigned int n, insize = 0;
int ret; int ret;
mutex_lock(&usbhid->mutex);
clear_bit(HID_DISCONNECTED, &usbhid->iofl); clear_bit(HID_DISCONNECTED, &usbhid->iofl);
usbhid->bufsize = HID_MIN_BUFFER_SIZE; usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid); usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1); device_set_wakeup_enable(&dev->dev, 1);
} }
mutex_unlock(&usbhid->mutex);
return 0; return 0;
fail: fail:
@ -1187,6 +1202,7 @@ fail:
usbhid->urbout = NULL; usbhid->urbout = NULL;
usbhid->urbctrl = NULL; usbhid->urbctrl = NULL;
hid_free_buffers(dev, hid); hid_free_buffers(dev, hid);
mutex_unlock(&usbhid->mutex);
return ret; return ret;
} }
@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->intf->needs_remote_wakeup = 0; usbhid->intf->needs_remote_wakeup = 0;
} }
mutex_lock(&usbhid->mutex);
clear_bit(HID_STARTED, &usbhid->iofl); clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl); set_bit(HID_DISCONNECTED, &usbhid->iofl);
@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->urbout = NULL; usbhid->urbout = NULL;
hid_free_buffers(hid_to_usb_dev(hid), hid); hid_free_buffers(hid_to_usb_dev(hid), hid);
mutex_unlock(&usbhid->mutex);
} }
static int usbhid_power(struct hid_device *hid, int lvl) static int usbhid_power(struct hid_device *hid, int lvl)
@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
INIT_WORK(&usbhid->reset_work, hid_reset); INIT_WORK(&usbhid->reset_work, hid_reset);
timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock); spin_lock_init(&usbhid->lock);
mutex_init(&usbhid->mutex);
ret = hid_add_device(hid); ret = hid_add_device(hid);
if (ret) { if (ret) {

View File

@ -80,6 +80,7 @@ struct usbhid_device {
dma_addr_t outbuf_dma; /* Output buffer dma */ dma_addr_t outbuf_dma; /* Output buffer dma */
unsigned long last_out; /* record of last output for timeouts */ unsigned long last_out; /* record of last output for timeouts */
struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */ spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
struct timer_list io_retry; /* Retry timer */ struct timer_list io_retry; /* Retry timer */

View File

@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
} }
data->config = config; data->config = config;
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
data, &jc42_chip_info, data, &jc42_chip_info,
NULL); NULL);
return PTR_ERR_OR_ZERO(hwmon_dev); return PTR_ERR_OR_ZERO(hwmon_dev);

View File

@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
struct altr_i2c_dev *idev = NULL; struct altr_i2c_dev *idev = NULL;
struct resource *res; struct resource *res;
int irq, ret; int irq, ret;
u32 val;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev) if (!idev)
@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
init_completion(&idev->msg_complete); init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock); spin_lock_init(&idev->lock);
val = device_property_read_u32(idev->dev, "fifo-size", ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size); &idev->fifo_size);
if (val) { if (ret) {
dev_err(&pdev->dev, "FIFO size set to default of %d\n", dev_err(&pdev->dev, "FIFO size set to default of %d\n",
ALTR_I2C_DFLT_FIFO_SZ); ALTR_I2C_DFLT_FIFO_SZ);
idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ; idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
} }
val = device_property_read_u32(idev->dev, "clock-frequency", ret = device_property_read_u32(idev->dev, "clock-frequency",
&idev->bus_clk_rate); &idev->bus_clk_rate);
if (val) { if (ret) {
dev_err(&pdev->dev, "Default to 100kHz\n"); dev_err(&pdev->dev, "Default to 100kHz\n");
idev->bus_clk_rate = 100000; /* default clock rate */ idev->bus_clk_rate = 100000; /* default clock rate */
} }

View File

@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
if (!privdata) if (!privdata)
return -ENOMEM; return -ENOMEM;
privdata->pci_dev = pci_dev;
rc = amd_mp2_pci_init(privdata, pci_dev); rc = amd_mp2_pci_init(privdata, pci_dev);
if (rc) if (rc)
return rc; return rc;
mutex_init(&privdata->c2p_lock); mutex_init(&privdata->c2p_lock);
privdata->pci_dev = pci_dev;
pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
pm_runtime_use_autosuspend(&pci_dev->dev); pm_runtime_use_autosuspend(&pci_dev->dev);

View File

@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
/* Ack all interrupts except for Rx done */ /* Ack all interrupts except for Rx done */
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG); bus->base + ASPEED_I2C_INTR_STS_REG);
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
irq_remaining = irq_received; irq_remaining = irq_received;
#if IS_ENABLED(CONFIG_I2C_SLAVE) #if IS_ENABLED(CONFIG_I2C_SLAVE)
@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
irq_received, irq_handled); irq_received, irq_handled);
/* Ack Rx done */ /* Ack Rx done */
if (irq_received & ASPEED_I2CD_INTR_RX_DONE) if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
writel(ASPEED_I2CD_INTR_RX_DONE, writel(ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG); bus->base + ASPEED_I2C_INTR_STS_REG);
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
}
spin_unlock(&bus->lock); spin_unlock(&bus->lock);
return irq_remaining ? IRQ_NONE : IRQ_HANDLED; return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
} }

View File

@ -541,7 +541,7 @@ static const struct iio_info ad7797_info = {
.read_raw = &ad7793_read_raw, .read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw, .write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt, .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
.attrs = &ad7793_attribute_group, .attrs = &ad7797_attribute_group,
.validate_trigger = ad_sd_validate_trigger, .validate_trigger = ad_sd_validate_trigger,
}; };

View File

@ -1367,8 +1367,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
static void stm32_adc_dma_buffer_done(void *data) static void stm32_adc_dma_buffer_done(void *data)
{ {
struct iio_dev *indio_dev = data; struct iio_dev *indio_dev = data;
struct stm32_adc *adc = iio_priv(indio_dev);
int residue = stm32_adc_dma_residue(adc);
iio_trigger_poll_chained(indio_dev->trig); /*
* In DMA mode the trigger services of IIO are not used
* (e.g. no call to iio_trigger_poll).
* Calling irq handler associated to the hardware trigger is not
* relevant as the conversions have already been done. Data
* transfers are performed directly in DMA callback instead.
* This implementation avoids to call trigger irq handler that
* may sleep, in an atomic context (DMA irq handler context).
*/
dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
while (residue >= indio_dev->scan_bytes) {
u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
iio_push_to_buffers(indio_dev, buffer);
residue -= indio_dev->scan_bytes;
adc->bufi += indio_dev->scan_bytes;
if (adc->bufi >= adc->rx_buf_sz)
adc->bufi = 0;
}
} }
static int stm32_adc_dma_start(struct iio_dev *indio_dev) static int stm32_adc_dma_start(struct iio_dev *indio_dev)
@ -1778,6 +1800,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
{ {
struct iio_dev *indio_dev; struct iio_dev *indio_dev;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
irqreturn_t (*handler)(int irq, void *p) = NULL;
struct stm32_adc *adc; struct stm32_adc *adc;
int ret; int ret;
@ -1845,9 +1868,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (!adc->dma_chan)
handler = &stm32_adc_trigger_handler;
ret = iio_triggered_buffer_setup(indio_dev, ret = iio_triggered_buffer_setup(indio_dev,
&iio_pollfunc_store_time, &iio_pollfunc_store_time, handler,
&stm32_adc_trigger_handler,
&stm32_adc_buffer_setup_ops); &stm32_adc_buffer_setup_ops);
if (ret) { if (ret) {
dev_err(&pdev->dev, "buffer setup failed\n"); dev_err(&pdev->dev, "buffer setup failed\n");

View File

@ -29,7 +29,7 @@ struct ads8344 {
struct mutex lock; struct mutex lock;
u8 tx_buf ____cacheline_aligned; u8 tx_buf ____cacheline_aligned;
u16 rx_buf; u8 rx_buf[3];
}; };
#define ADS8344_VOLTAGE_CHANNEL(chan, si) \ #define ADS8344_VOLTAGE_CHANNEL(chan, si) \
@ -89,11 +89,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel,
udelay(9); udelay(9);
ret = spi_read(spi, &adc->rx_buf, 2); ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf));
if (ret) if (ret)
return ret; return ret;
return adc->rx_buf; return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7;
} }
static int ads8344_read_raw(struct iio_dev *iio, static int ads8344_read_raw(struct iio_dev *iio,

View File

@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_FLAGS_BUFFERED BIT(0) #define XADC_FLAGS_BUFFERED BIT(0)
/*
* The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
* not have a hardware FIFO. Which means an interrupt is generated for each
* conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
* overloaded by the interrupts that it soft-lockups. For this reason the driver
* limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
* but still responsive.
*/
#define XADC_MAX_SAMPLERATE 150000
static void xadc_write_reg(struct xadc *xadc, unsigned int reg, static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
uint32_t val) uint32_t val)
{ {
@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
spin_lock_irqsave(&xadc->lock, flags); spin_lock_irqsave(&xadc->lock, flags);
xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
if (state) if (state)
val |= XADC_AXI_INT_EOS; val |= XADC_AXI_INT_EOS;
else else
@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{ {
uint16_t val; uint16_t val;
/* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) { switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS: case XADC_CONF1_SEQ_SIMULTANEOUS:
case XADC_CONF1_SEQ_INDEPENDENT: case XADC_CONF1_SEQ_INDEPENDENT:
val = XADC_CONF2_PD_ADC_B; val = 0;
break; break;
default: default:
val = 0; val = XADC_CONF2_PD_ADC_B;
break; break;
} }
@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
if (ret) if (ret)
goto err; goto err;
/*
* In simultaneous mode the upper and lower aux channels are samples at
* the same time. In this mode the upper 8 bits in the sequencer
* register are don't care and the lower 8 bits control two channels
* each. As such we must set the bit if either the channel in the lower
* group or the upper group is enabled.
*/
if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
if (ret) if (ret)
goto err; goto err;
@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
.postdisable = &xadc_postdisable, .postdisable = &xadc_postdisable,
}; };
static int xadc_read_samplerate(struct xadc *xadc)
{
unsigned int div;
uint16_t val16;
int ret;
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
if (ret)
return ret;
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
if (div < 2)
div = 2;
return xadc_get_dclk_rate(xadc) / div / 26;
}
static int xadc_read_raw(struct iio_dev *indio_dev, static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info) struct iio_chan_spec const *chan, int *val, int *val2, long info)
{ {
struct xadc *xadc = iio_priv(indio_dev); struct xadc *xadc = iio_priv(indio_dev);
unsigned int div;
uint16_t val16; uint16_t val16;
int ret; int ret;
@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = -((273150 << 12) / 503975); *val = -((273150 << 12) / 503975);
return IIO_VAL_INT; return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ: case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); ret = xadc_read_samplerate(xadc);
if (ret) if (ret < 0)
return ret; return ret;
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; *val = ret;
if (div < 2)
div = 2;
*val = xadc_get_dclk_rate(xadc) / div / 26;
return IIO_VAL_INT; return IIO_VAL_INT;
default: default:
return -EINVAL; return -EINVAL;
} }
} }
static int xadc_write_raw(struct iio_dev *indio_dev, static int xadc_write_samplerate(struct xadc *xadc, int val)
struct iio_chan_spec const *chan, int val, int val2, long info)
{ {
struct xadc *xadc = iio_priv(indio_dev);
unsigned long clk_rate = xadc_get_dclk_rate(xadc); unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div; unsigned int div;
if (!clk_rate) if (!clk_rate)
return -EINVAL; return -EINVAL;
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
if (val <= 0) if (val <= 0)
return -EINVAL; return -EINVAL;
/* Max. 150 kSPS */ /* Max. 150 kSPS */
if (val > 150000) if (val > XADC_MAX_SAMPLERATE)
val = 150000; val = XADC_MAX_SAMPLERATE;
val *= 26; val *= 26;
@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
* limit. * limit.
*/ */
div = clk_rate / val; div = clk_rate / val;
if (clk_rate / div / 26 > 150000) if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
div++; div++;
if (div < 2) if (div < 2)
div = 2; div = 2;
@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
div << XADC_CONF2_DIV_OFFSET); div << XADC_CONF2_DIV_OFFSET);
} }
static int xadc_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
return xadc_write_samplerate(xadc, val);
}
static const struct iio_event_spec xadc_temp_events[] = { static const struct iio_event_spec xadc_temp_events[] = {
{ {
.type = IIO_EV_TYPE_THRESH, .type = IIO_EV_TYPE_THRESH,
@ -1225,6 +1263,21 @@ static int xadc_probe(struct platform_device *pdev)
if (ret) if (ret)
goto err_free_samplerate_trigger; goto err_free_samplerate_trigger;
/*
* Make sure not to exceed the maximum samplerate since otherwise the
* resulting interrupt storm will soft-lock the system.
*/
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = xadc_read_samplerate(xadc);
if (ret < 0)
goto err_free_samplerate_trigger;
if (ret > XADC_MAX_SAMPLERATE) {
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
if (ret < 0)
goto err_free_samplerate_trigger;
}
}
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev); dev_name(&pdev->dev), indio_dev);
if (ret) if (ret)

View File

@ -92,7 +92,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0}; struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev); struct st_sensor_data *sdata = iio_priv(indio_dev);
if (!sdata->sensor_settings->odr.addr) if (!sdata->sensor_settings->odr.mask)
return 0; return 0;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out); err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);

View File

@ -597,18 +597,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return 0; return 0;
} }
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
int err;
u32 id;
err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
return err;
}
static u32 cm_local_id(__be32 local_id) static u32 cm_local_id(__be32 local_id)
{ {
return (__force u32) (local_id ^ cm.random_id_operand); return (__force u32) (local_id ^ cm.random_id_operand);
@ -862,6 +850,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
void *context) void *context)
{ {
struct cm_id_private *cm_id_priv; struct cm_id_private *cm_id_priv;
u32 id;
int ret; int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
@ -873,9 +862,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context; cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1; cm_id_priv->id.remote_cm_qpn = 1;
ret = cm_alloc_id(cm_id_priv);
if (ret)
goto error;
spin_lock_init(&cm_id_priv->lock); spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp); init_completion(&cm_id_priv->comp);
@ -884,11 +870,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->altr_list); INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1); atomic_set(&cm_id_priv->refcount, 1);
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
&cm.local_id_next, GFP_KERNEL);
if (ret < 0)
goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
cm_id_priv, GFP_KERNEL);
return &cm_id_priv->id; return &cm_id_priv->id;
error: error:
kfree(cm_id_priv); kfree(cm_id_priv);
return ERR_PTR(-ENOMEM); return ERR_PTR(ret);
} }
EXPORT_SYMBOL(ib_create_cm_id); EXPORT_SYMBOL(ib_create_cm_id);

View File

@ -362,7 +362,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* and the caller is expected to ensure that uverbs_close_fd is never * and the caller is expected to ensure that uverbs_close_fd is never
* done while a call top lookup is possible. * done while a call top lookup is possible.
*/ */
if (f->f_op != fd_type->fops) { if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f); fput(f);
return ERR_PTR(-EBADF); return ERR_PTR(-EBADF);
} }
@ -689,7 +689,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode) enum rdma_lookup_mode mode)
{ {
assert_uverbs_usecnt(uobj, mode); assert_uverbs_usecnt(uobj, mode);
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* /*
* In order to unlock an object, either decrease its usecnt for * In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See * read access or zero it in case of exclusive access. See
@ -706,6 +705,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break; break;
} }
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */ /* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
} }

View File

@ -1490,8 +1490,9 @@ static int __mlx4_ib_create_default_rules(
int i; int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
union ib_flow_spec ib_spec = {};
int ret; int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) { switch (pdefault_rules->rules_create_list[i]) {
case 0: case 0:
/* no rule */ /* no rule */

View File

@ -5375,7 +5375,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr, rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0); path->static_rate ? path->static_rate - 5 : 0);
if (path->grh_mlid & (1 << 7)) {
if (path->grh_mlid & (1 << 7) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
rdma_ah_set_grh(ah_attr, NULL, rdma_ah_set_grh(ah_attr, NULL,

View File

@ -2848,7 +2848,7 @@ static int __init parse_amd_iommu_intr(char *str)
{ {
for (; *str; ++str) { for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) { if (strncmp(str, "legacy", 6) == 0) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
break; break;
} }
if (strncmp(str, "vapic", 5) == 0) { if (strncmp(str, "vapic", 5) == 0) {

View File

@ -788,8 +788,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
qcom_iommu->dev = dev; qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res) if (res) {
qcom_iommu->local_base = devm_ioremap_resource(dev, res); qcom_iommu->local_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom_iommu->local_base))
return PTR_ERR(qcom_iommu->local_base);
}
qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) { if (IS_ERR(qcom_iommu->iface_clk)) {

View File

@ -576,10 +576,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath); pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, bio->bi_iter.bi_size); pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
/* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if ((pgpath && queue_io) || if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) { (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
/* Queue for the daemon to resubmit */ /* Queue for the daemon to resubmit */

View File

@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
fio->level++; fio->level++;
if (type == DM_VERITY_BLOCK_TYPE_METADATA) if (type == DM_VERITY_BLOCK_TYPE_METADATA)
block += v->data_blocks; block = block - v->hash_start + v->data_blocks;
/* /*
* For RS(M, N), the continuous FEC data is divided into blocks of N * For RS(M, N), the continuous FEC data is divided into blocks of N

View File

@ -879,6 +879,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return 0; return 0;
} }
static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
{
struct dm_io_region region;
struct dm_io_request req;
region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector;
region.count = n_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
req.notify.fn = NULL;
return dm_io(&req, 1, &region, NULL);
}
static void writecache_resume(struct dm_target *ti) static void writecache_resume(struct dm_target *ti)
{ {
struct dm_writecache *wc = ti->private; struct dm_writecache *wc = ti->private;
@ -889,8 +907,18 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc); wc_lock(wc);
if (WC_MODE_PMEM(wc)) if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
} else {
r = writecache_read_metadata(wc, wc->metadata_sectors);
if (r) {
size_t sb_entries_offset;
writecache_error(wc, r, "unable to read metadata: %d", r);
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
memset((char *)wc->memory_map + sb_entries_offset, -1,
(wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
}
}
wc->tree = RB_ROOT; wc->tree = RB_ROOT;
INIT_LIST_HEAD(&wc->lru); INIT_LIST_HEAD(&wc->lru);
@ -1972,6 +2000,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Invalid block size"; ti->error = "Invalid block size";
goto bad; goto bad;
} }
if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
r = -EINVAL;
ti->error = "Block size is smaller than device logical block size";
goto bad;
}
wc->block_size_bits = __ffs(wc->block_size); wc->block_size_bits = __ffs(wc->block_size);
wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@ -2060,8 +2094,6 @@ invalid_optional:
goto bad; goto bad;
} }
} else { } else {
struct dm_io_region region;
struct dm_io_request req;
size_t n_blocks, n_metadata_blocks; size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits; uint64_t n_bitmap_bits;
@ -2118,19 +2150,9 @@ invalid_optional:
goto bad; goto bad;
} }
region.bdev = wc->ssd_dev->bdev; r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
region.sector = wc->start_sector;
region.count = wc->metadata_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
req.notify.fn = NULL;
r = dm_io(&req, 1, &region, NULL);
if (r) { if (r) {
ti->error = "Unable to read metadata"; ti->error = "Unable to read first block of metadata";
goto bad; goto bad;
} }
} }

View File

@ -236,22 +236,18 @@ int go7007_snd_init(struct go7007 *go)
gosnd->capturing = 0; gosnd->capturing = 0;
ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0,
&gosnd->card); &gosnd->card);
if (ret < 0) { if (ret < 0)
kfree(gosnd); goto free_snd;
return ret;
}
ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go,
&go7007_snd_device_ops); &go7007_snd_device_ops);
if (ret < 0) { if (ret < 0)
kfree(gosnd); goto free_card;
return ret;
}
ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm);
if (ret < 0) { if (ret < 0)
snd_card_free(gosnd->card); goto free_card;
kfree(gosnd);
return ret;
}
strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); strscpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver));
strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); strscpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver));
strscpy(gosnd->card->longname, gosnd->card->shortname, strscpy(gosnd->card->longname, gosnd->card->shortname,
@ -262,11 +258,8 @@ int go7007_snd_init(struct go7007 *go)
&go7007_snd_capture_ops); &go7007_snd_capture_ops);
ret = snd_card_register(gosnd->card); ret = snd_card_register(gosnd->card);
if (ret < 0) { if (ret < 0)
snd_card_free(gosnd->card); goto free_card;
kfree(gosnd);
return ret;
}
gosnd->substream = NULL; gosnd->substream = NULL;
go->snd_context = gosnd; go->snd_context = gosnd;
@ -274,6 +267,12 @@ int go7007_snd_init(struct go7007 *go)
++dev; ++dev;
return 0; return 0;
free_card:
snd_card_free(gosnd->card);
free_snd:
kfree(gosnd);
return ret;
} }
EXPORT_SYMBOL(go7007_snd_init); EXPORT_SYMBOL(go7007_snd_init);

View File

@ -395,7 +395,7 @@ int intel_lpss_probe(struct device *dev,
if (!lpss) if (!lpss)
return -ENOMEM; return -ENOMEM;
lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET, lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
LPSS_PRIV_SIZE); LPSS_PRIV_SIZE);
if (!lpss->priv) if (!lpss->priv)
return -ENOMEM; return -ENOMEM;

View File

@ -5,6 +5,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -343,12 +344,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */ /* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100 #define CQHCI_OFF_TIMEOUT 100
static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
{
return cqhci_readl(cq_host, CQHCI_CTL);
}
static void cqhci_off(struct mmc_host *mmc) static void cqhci_off(struct mmc_host *mmc)
{ {
struct cqhci_host *cq_host = mmc->cqe_private; struct cqhci_host *cq_host = mmc->cqe_private;
ktime_t timeout;
bool timed_out;
u32 reg; u32 reg;
int err;
if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return; return;
@ -358,15 +363,9 @@ static void cqhci_off(struct mmc_host *mmc)
cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
while (1) { reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
timed_out = ktime_compare(ktime_get(), timeout) > 0; if (err < 0)
reg = cqhci_readl(cq_host, CQHCI_CTL);
if ((reg & CQHCI_HALT) || timed_out)
break;
}
if (timed_out)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc)); pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));

View File

@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd); meson_mx_mmc_start_cmd(mmc, mrq->cmd);
} }
static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
{
struct meson_mx_mmc_host *host = mmc_priv(mmc);
u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
}
static void meson_mx_mmc_read_response(struct mmc_host *mmc, static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd) struct mmc_command *cmd)
{ {
@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = { static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request, .request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios, .set_ios = meson_mx_mmc_set_ios,
.card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd, .get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro, .get_ro = mmc_gpio_get_ro,
}; };
@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk, mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk)); clk_get_rate(host->parent_clk));
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops; mmc->ops = &meson_mx_mmc_ops;
ret = mmc_of_parse(mmc); ret = mmc_of_parse(mmc);

View File

@ -1946,6 +1946,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable; goto clk_disable;
} }
msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);

View File

@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host); struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot); struct intel_host *intel_host = sdhci_pci_priv(slot);
if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
return 0;
return intel_host->drv_strength; return intel_host->drv_strength;
} }

View File

@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{ {
/* Wait for 5ms after set 1.8V signal enable bit */ /* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500); usleep_range(5000, 5500);
/*
* For some reason the controller's Host Control2 register reports
* the bit representing 1.8V signaling as 0 when read after it was
* written as 1. Subsequent read reports 1.
*
* Since this may cause some issues, do an empty read of the Host
* Control2 register here to circumvent this.
*/
sdhci_readw(host, SDHCI_HOST_CONTROL2);
} }
static const struct sdhci_ops sdhci_xenon_ops = { static const struct sdhci_ops sdhci_xenon_ops = {

View File

@ -1425,6 +1425,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
reg |= ARLTBL_RW; reg |= ARLTBL_RW;
else else
reg &= ~ARLTBL_RW; reg &= ~ARLTBL_RW;
if (dev->vlan_enabled)
reg &= ~ARLTBL_IVL_SVL_SELECT;
else
reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev); return b53_arl_op_wait(dev);
@ -1434,6 +1438,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx, u16 vid, struct b53_arl_entry *ent, u8 *idx,
bool is_valid) bool is_valid)
{ {
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i; unsigned int i;
int ret; int ret;
@ -1441,6 +1446,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret) if (ret)
return ret; return ret;
bitmap_zero(free_bins, dev->num_arl_entries);
/* Read the bins */ /* Read the bins */
for (i = 0; i < dev->num_arl_entries; i++) { for (i = 0; i < dev->num_arl_entries; i++) {
u64 mac_vid; u64 mac_vid;
@ -1452,13 +1459,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry); b53_arl_to_entry(ent, mac_vid, fwd_entry);
if (!(fwd_entry & ARLTBL_VALID)) if (!(fwd_entry & ARLTBL_VALID)) {
set_bit(i, free_bins);
continue; continue;
}
if ((mac_vid & ARLTBL_MAC_MASK) != mac) if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue; continue;
if (dev->vlan_enabled &&
((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
continue;
*idx = i; *idx = i;
return 0;
} }
if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
return -ENOSPC;
*idx = find_first_bit(free_bins, dev->num_arl_entries);
return -ENOENT; return -ENOENT;
} }
@ -1488,10 +1506,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (op) if (op)
return ret; return ret;
/* We could not find a matching MAC, so reset to a new entry */ switch (ret) {
if (ret) { case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
return is_valid ? ret : 0;
case -ENOENT:
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
fwd_entry = 0; fwd_entry = 0;
idx = 1; break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
addr, vid, idx);
break;
} }
memset(&ent, 0, sizeof(ent)); memset(&ent, 0, sizeof(ent));

View File

@ -292,6 +292,7 @@
/* ARL Table Read/Write Register (8 bit) */ /* ARL Table Read/Write Register (8 bit) */
#define B53_ARLTBL_RW_CTRL 0x00 #define B53_ARLTBL_RW_CTRL 0x00
#define ARLTBL_RW BIT(0) #define ARLTBL_RW BIT(0)
#define ARLTBL_IVL_SVL_SELECT BIT(6)
#define ARLTBL_START_DONE BIT(7) #define ARLTBL_START_DONE BIT(7)
/* MAC Address Index Register (48 bit) */ /* MAC Address Index Register (48 bit) */
@ -304,7 +305,7 @@
* *
* BCM5325 and BCM5365 share most definitions below * BCM5325 and BCM5365 share most definitions below
*/ */
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) #define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48 #define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK_25 0xff
@ -316,13 +317,16 @@
#define ARLTBL_VALID_25 BIT(63) #define ARLTBL_VALID_25 BIT(63)
/* ARL Table Data Entry N Registers (32 bit) */ /* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) #define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff #define ARLTBL_DATA_PORT_ID_MASK 0x1ff
#define ARLTBL_TC(tc) ((3 & tc) << 11) #define ARLTBL_TC(tc) ((3 & tc) << 11)
#define ARLTBL_AGE BIT(14) #define ARLTBL_AGE BIT(14)
#define ARLTBL_STATIC BIT(15) #define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16) #define ARLTBL_VALID BIT(16)
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */ /* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20 #define B53_ARL_SRCH_CTL_25 0x20

View File

@ -666,7 +666,8 @@ static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
dma_addr_t mapping; dma_addr_t mapping;
/* Allocate a new SKB for a new packet */ /* Allocate a new SKB for a new packet */
skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
GFP_ATOMIC | __GFP_NOWARN);
if (!skb) { if (!skb) {
priv->mib.alloc_rx_buff_failed++; priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); netif_err(priv, rx_err, ndev, "SKB alloc failed\n");

View File

@ -6266,7 +6266,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
int rc; int rc;
if (!mem_size) if (!mem_size)
return 0; return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@ -9197,6 +9197,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE; features &= ~NETIF_F_NTUPLE;
@ -9213,12 +9214,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be /* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together. * turned on or off together.
*/ */
if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { NETIF_F_HW_VLAN_STAG_RX);
if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX | features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX); NETIF_F_HW_VLAN_STAG_RX);
else else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX | features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX; NETIF_F_HW_VLAN_STAG_RX;
} }
@ -11049,12 +11052,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} }
} }
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) if (result != PCI_ERS_RESULT_RECOVERED) {
dev_close(netdev); if (netif_running(netdev))
dev_close(netdev);
pci_disable_device(pdev);
}
rtnl_unlock(); rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED; return result;
} }
/** /**

View File

@ -39,7 +39,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401 #define NVM_OFF_ENABLE_SRIOV 401
#define BNXT_MSIX_VEC_MAX 1280 #define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128 #define BNXT_MSIX_VEC_MIN_MAX 128
enum bnxt_nvm_dir_type { enum bnxt_nvm_dir_type {

View File

@ -995,6 +995,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev)) if (netif_running(dev))
bcmgenet_update_mib_counters(priv); bcmgenet_update_mib_counters(priv);
dev->netdev_ops->ndo_get_stats(dev);
for (i = 0; i < BCMGENET_STATS_LEN; i++) { for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s; const struct bcmgenet_stats *s;
char *p; char *p;
@ -1694,7 +1696,8 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
dma_addr_t mapping; dma_addr_t mapping;
/* Allocate a new Rx skb */ /* Allocate a new Rx skb */
skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
GFP_ATOMIC | __GFP_NOWARN);
if (!skb) { if (!skb) {
priv->mib.alloc_rx_buff_failed++; priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, priv->dev, netif_err(priv, rx_err, priv->dev,
@ -3208,6 +3211,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
dev->stats.rx_packets = rx_packets; dev->stats.rx_packets = rx_packets;
dev->stats.rx_errors = rx_errors; dev->stats.rx_errors = rx_errors;
dev->stats.rx_missed_errors = rx_errors; dev->stats.rx_missed_errors = rx_errors;
dev->stats.rx_dropped = rx_dropped;
return &dev->stats; return &dev->stats;
} }

View File

@ -1054,9 +1054,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
} }
} }
static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init, static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err, struct cudbg_error *cudbg_err,
u8 mem_type) u8 mem_type, unsigned long *region_size)
{ {
struct adapter *padap = pdbg_init->adap; struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info; struct cudbg_meminfo mem_info;
@ -1065,15 +1065,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info); rc = cudbg_fill_meminfo(padap, &mem_info);
if (rc) if (rc) {
cudbg_err->sys_err = rc;
return rc; return rc;
}
cudbg_t4_fwcache(pdbg_init, cudbg_err); cudbg_t4_fwcache(pdbg_init, cudbg_err);
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx); rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
if (rc) if (rc) {
cudbg_err->sys_err = rc;
return rc; return rc;
}
return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; if (region_size)
*region_size = mem_info.avail[mc_idx].limit -
mem_info.avail[mc_idx].base;
return 0;
} }
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
@ -1081,7 +1089,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err, struct cudbg_error *cudbg_err,
u8 mem_type) u8 mem_type)
{ {
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type); unsigned long size = 0;
int rc;
rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
if (rc)
return rc;
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err); cudbg_err);

View File

@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/ */
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{ {
struct adapter *adapter = (struct adapter *)container_of(ptp, struct adapter *adapter = container_of(ptp, struct adapter,
struct adapter, ptp_clock_info); ptp_clock_info);
struct fw_ptp_cmd c;
u64 ns; u64 ns;
int err;
memset(&c, 0, sizeof(c)); ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | ns |= (u64)t4_read_reg(adapter,
FW_CMD_REQUEST_F | T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
FW_CMD_READ_F |
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_GET_TIME;
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
if (err < 0) {
dev_err(adapter->pdev_dev,
"PTP: %s error %d\n", __func__, -err);
return err;
}
/* convert to timespec*/ /* convert to timespec*/
ns = be64_to_cpu(c.u.ts.tm);
*ts = ns_to_timespec64(ns); *ts = ns_to_timespec64(ns);
return 0;
return err;
} }
/** /**

View File

@ -3748,7 +3748,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val); &param, &val);
if (ret < 0) if (ret)
return ret; return ret;
*phy_fw_ver = val; *phy_fw_ver = val;
return 0; return 0;

View File

@ -1896,6 +1896,9 @@
#define MAC_PORT_CFG2_A 0x818 #define MAC_PORT_CFG2_A 0x818
#define MAC_PORT_PTP_SUM_LO_A 0x990
#define MAC_PORT_PTP_SUM_HI_A 0x994
#define MPS_CMN_CTL_A 0x9000 #define MPS_CMN_CTL_A 0x9000
#define COUNTPAUSEMCRX_S 5 #define COUNTPAUSEMCRX_S 5

Some files were not shown because too many files have changed in this diff Show More