This is the 6.6.21 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmXoglwACgkQONu9yGCS
 aT4b5Q/+NTo9lSsob4MeA31IdXx/zxHSQKnREAGN5A4pevI6HJbnqJO6jQxZ2FKk
 2uB51Xjw/vhaQOljnNpkrCLrZBKoh/BuodxS7oYjYfDyba+DCoFMkqBtav9ZzlcF
 JcoPOOm4lEvTOozs/3ulmWx+ZZC1pnZt5JEmUTjL17AHdZUlugDg49WiZlEm3Le9
 ApmBN1t9VJz64wHsmbjgXiPUVb76I08knCBv/No1hrJmemK4Gn9lZn3WAIiSsWvp
 Dk4bzHmLleHbIW3EKc5VS1Jhp+m3/gGDP6Z4bN0B9HLu2qc8HT91rpC+giLzwIv2
 L1G8j9a5nyQEPropN+EUsJSzk0iez6o9DrSEDj2XmgPnxB0wsTwrmZvPjaU6M03Z
 QLMgFbiveeDSHjvt9RZvrIAMklW6PjZqaX4ZMvHxGJvvayFfaEQCb40/KwppvdiX
 C/U3fT3L0RdSRu/V/k+W/a8O65EJ/5ZLd3G/RNSiWhy/uZoSpfbSLADo/zUXqNc3
 bxSpVv7NAfA/riZlAeN/WXq2eZd0pQYF4BLK4UEJ2muPs2ZA42D3bGg19TgNRAgs
 mAovy5Liuj/iTsi9hXGymrMNp4plbM/Sie9qs+rwNJBXrYctkNPOAiR9ng381dX2
 1Icy4uek50/CqWocxTtblp8ch+dT68AKPZzkjB0RCvjneWVc2LI=
 =hAft
 -----END PGP SIGNATURE-----

Merge tag 'v6.6.21' into lf-6.6.y

This is the 6.6.21 stable release

* tag 'v6.6.21': (143 commits)
  Linux 6.6.21
  drm/nouveau: don't fini scheduler before entity flush
  selftests: mptcp: rm subflow with v4/v4mapped addr
  ...

Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>

 Conflicts:
	drivers/dma/fsl-edma-common.c
	drivers/dma/fsl-qdma.c
	drivers/net/ethernet/freescale/fman/fman_memac.c
	drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
This commit is contained in:
Jason Liu 2024-03-13 14:24:30 +08:00
commit ffcb081b1e
149 changed files with 1834 additions and 861 deletions

View File

@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
(idle) transitions.
@ -138,17 +141,30 @@ Mitigation points
When transitioning from kernel to user space the CPU buffers are flushed
on affected CPUs when the mitigation is not disabled on the kernel
command line. The migitation is enabled through the static key
mds_user_clear.
command line. The mitigation is enabled through the feature flag
X86_FEATURE_CLEAR_CPU_BUF.
The mitigation is invoked in prepare_exit_to_usermode() which covers
all but one of the kernel to user space transitions. The exception
is when we return from a Non Maskable Interrupt (NMI), which is
handled directly in do_nmi().
The mitigation is invoked just before transitioning to userspace after
user registers are restored. This is done to minimize the window in
which kernel data could be accessed after VERW e.g. via an NMI after
VERW.
(The reason that NMI is special is that prepare_exit_to_usermode() can
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
enable IRQs with NMIs blocked.)
**Corner case not handled**
Interrupts returning to kernel don't clear CPUs buffers since the
exit-to-user path is expected to do that anyways. But, there could be
a case when an NMI is generated in kernel after the exit-to-user path
has cleared the buffers. This case is not handled and NMI returning to
kernel don't clear CPU buffers because:
1. It is rare to get an NMI after VERW, but before returning to userspace.
2. For an unprivileged user, there is no known way to make that NMI
less rare or target it.
3. It would take a large number of these precisely-timed NMIs to mount
an actual attack. There's presumably not enough bandwidth.
4. The NMI in question occurs after a VERW, i.e. when user state is
restored and most interesting data is already scrubbed. Whats left
is only the data that NMI touches, and that may or may not be of
any interest.
2. C-State transition

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 20
SUBLEVEL = 21
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
src += blocks * AES_BLOCK_SIZE;
}
if (nbytes && walk.nbytes == walk.total) {
u8 buf[AES_BLOCK_SIZE];
u8 *d = dst;
if (unlikely(nbytes < AES_BLOCK_SIZE))
src = dst = memcpy(buf + sizeof(buf) - nbytes,
src, nbytes);
neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
nbytes, walk.iv);
if (unlikely(nbytes < AES_BLOCK_SIZE))
memcpy(d, dst, nbytes);
nbytes = 0;
}
kernel_neon_end();

View File

@ -68,7 +68,7 @@ enum rtas_function_index {
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS,
RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW,
RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
RTAS_FNIDX__IBM_SET_EEH_OPTION,
@ -163,7 +163,7 @@ typedef struct {
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE2 rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
#define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
#define RTAS_FN_IBM_RESET_PE_DMA_WINDOWS rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS)
#define RTAS_FN_IBM_RESET_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW)
#define RTAS_FN_IBM_SCAN_LOG_DUMP rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
#define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
#define RTAS_FN_IBM_SET_EEH_OPTION rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)

View File

@ -310,8 +310,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
.name = "ibm,remove-pe-dma-window",
},
[RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
.name = "ibm,reset-pe-dma-windows",
[RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
/*
* Note: PAPR+ v2.13 7.3.31.4.1 spells this as
* "ibm,reset-pe-dma-windows" (plural), but RTAS
* implementations use the singular form in practice.
*/
.name = "ibm,reset-pe-dma-window",
},
[RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
.name = "ibm,scan-log-dump",

View File

@ -574,29 +574,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table_ops iommu_table_lpar_multi_ops;
/*
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
struct iommu_table_group *table_group,
const __be32 *dma_window)
{
unsigned long offset, size, liobn;
of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
&iommu_table_lpar_multi_ops);
table_group->tce32_start = offset;
table_group->tce32_size = size;
}
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
.clear = tce_free_pSeries,
@ -724,26 +701,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
* dynamic 64bit DMA window, walking up the device tree.
*/
static struct device_node *pci_dma_find(struct device_node *dn,
const __be32 **dma_window)
struct dynamic_dma_window_prop *prop)
{
const __be32 *dw = NULL;
const __be32 *default_prop = NULL;
const __be32 *ddw_prop = NULL;
struct device_node *rdn = NULL;
bool default_win = false, ddw_win = false;
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
dw = of_get_property(dn, "ibm,dma-window", NULL);
if (dw) {
if (dma_window)
*dma_window = dw;
return dn;
default_prop = of_get_property(dn, "ibm,dma-window", NULL);
if (default_prop) {
rdn = dn;
default_win = true;
}
dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
if (dw)
return dn;
dw = of_get_property(dn, DMA64_PROPNAME, NULL);
if (dw)
return dn;
ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
if (ddw_prop) {
rdn = dn;
ddw_win = true;
break;
}
ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
if (ddw_prop) {
rdn = dn;
ddw_win = true;
break;
}
/* At least found default window, which is the case for normal boot */
if (default_win)
break;
}
return NULL;
/* For PCI devices there will always be a DMA window, either on the device
* or parent bus
*/
WARN_ON(!(default_win | ddw_win));
/* caller doesn't want to get DMA window property */
if (!prop)
return rdn;
/* parse DMA window property. During normal system boot, only default
* DMA window is passed in OF. But, for kdump, a dedicated adapter might
* have both default and DDW in FDT. In this scenario, DDW takes precedence
* over default window.
*/
if (ddw_win) {
struct dynamic_dma_window_prop *p;
p = (struct dynamic_dma_window_prop *)ddw_prop;
prop->liobn = p->liobn;
prop->dma_base = p->dma_base;
prop->tce_shift = p->tce_shift;
prop->window_shift = p->window_shift;
} else if (default_win) {
unsigned long offset, size, liobn;
of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
prop->liobn = cpu_to_be32((u32)liobn);
prop->dma_base = cpu_to_be64(offset);
prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
prop->window_shift = cpu_to_be32(order_base_2(size));
}
return rdn;
}
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
@ -751,17 +773,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
const __be32 *dma_window = NULL;
struct dynamic_dma_window_prop prop;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
pdn = pci_dma_find(dn, &dma_window);
pdn = pci_dma_find(dn, &prop);
if (dma_window == NULL)
pr_debug(" no ibm,dma-window property !\n");
/* In PPC architecture, there will always be DMA window on bus or one of the
* parent bus. During reboot, there will be ibm,dma-window property to
* define DMA window. For kdump, there will at least be default window or DDW
* or both.
*/
ppci = PCI_DN(pdn);
@ -771,13 +796,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
if (dma_window) {
iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
ppci->table_group, dma_window);
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
}
iommu_table_setparms_common(tbl, ppci->phb->bus->number,
be32_to_cpu(prop.liobn),
be64_to_cpu(prop.dma_base),
1ULL << be32_to_cpu(prop.window_shift),
be32_to_cpu(prop.tce_shift), NULL,
&iommu_table_lpar_multi_ops);
/* Only for normal boot with default window. Doesn't matter even
* if we set these with DDW which is 64bit during kdump, since
* these will not be used during kdump.
*/
ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@ -968,6 +1004,12 @@ static void find_existing_ddw_windows_named(const char *name)
continue;
}
/* If at the time of system initialization, there are DDWs in OF,
* it means this is during kexec. DDW could be direct or dynamic.
* We will just mark DDWs as "dynamic" since this is kdump path,
* no need to worry about perforance. ddw_list_new_entry() will
* set window->direct = false.
*/
window = ddw_list_new_entry(pdn, dma64);
if (!window) {
of_node_put(pdn);
@ -1524,8 +1566,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
const __be32 *dma_window = NULL;
struct pci_dn *pci;
struct dynamic_dma_window_prop prop;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@ -1538,7 +1580,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
pdn = pci_dma_find(dn, &dma_window);
pdn = pci_dma_find(dn, &prop);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
@ -1551,8 +1593,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pci->table_group) {
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
iommu_table_setparms_lpar(pci->phb, pdn, tbl,
pci->table_group, dma_window);
iommu_table_setparms_common(tbl, pci->phb->bus->number,
be32_to_cpu(prop.liobn),
be64_to_cpu(prop.dma_base),
1ULL << be32_to_cpu(prop.window_shift),
be32_to_cpu(prop.tce_shift), NULL,
&iommu_table_lpar_multi_ops);
/* Only for normal boot with default window. Doesn't matter even
* if we set these with DDW which is 64bit during kdump, since
* these will not be used during kdump.
*/
pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
iommu_init_table(tbl, pci->phb->node, 0, 0);
iommu_register_group(pci->table_group,

View File

@ -287,7 +287,6 @@ config AS_HAS_OPTION_ARCH
# https://reviews.llvm.org/D123515
def_bool y
depends on $(as-instr, .option arch$(comma) +m)
depends on !$(as-instr, .option arch$(comma) -i)
source "arch/riscv/Kconfig.socs"
source "arch/riscv/Kconfig.errata"

View File

@ -25,6 +25,11 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
extern void *return_address(unsigned int level);
#define ftrace_return_address(n) return_address(n)
void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{

View File

@ -11,8 +11,10 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h);
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
#endif
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR

View File

@ -84,7 +84,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
#define vmemmap ((struct page *)VMEMMAP_START)
#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
@ -438,6 +438,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
#define pte_leaf_size(pte) (pte_napot(pte) ? \
napot_cont_size(napot_cont_order(pte)) :\
PAGE_SIZE)
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h

View File

@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
return true;
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#include <linux/pgtable.h>
#endif
#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
u64 pfn, unsigned int max_page_shift)
{
unsigned long map_size = PAGE_SIZE;
unsigned long size, order;
if (!has_svnapot())
return map_size;
for_each_napot_order_rev(order) {
if (napot_cont_shift(order) > max_page_shift)
continue;
size = napot_cont_size(order);
if (end - addr < size)
continue;
if (!IS_ALIGNED(addr, size))
continue;
if (!IS_ALIGNED(PFN_PHYS(pfn), size))
continue;
map_size = size;
break;
}
return map_size;
}
#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
int shift = PAGE_SHIFT;
unsigned long order;
if (!has_svnapot())
return shift;
WARN_ON_ONCE(size >= PMD_SIZE);
for_each_napot_order_rev(order) {
if (napot_cont_size(order) > size)
continue;
if (!IS_ALIGNED(size, napot_cont_size(order)))
continue;
shift = napot_cont_shift(order);
break;
}
return shift;
}
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* _ASM_RISCV_VMALLOC_H */

View File

@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
@ -46,6 +47,7 @@ obj-y += irq.o
obj-y += process.o
obj-y += ptrace.o
obj-y += reset.o
obj-y += return_address.o
obj-y += setup.o
obj-y += signal.o
obj-y += syscall_table.o

View File

@ -21,6 +21,7 @@
#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/sbi.h>
#include <asm/vector.h>
#include "copy-unaligned.h"
@ -396,6 +397,20 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
}
/*
* "V" in ISA strings is ambiguous in practice: it should mean
* just the standard V-1.0 but vendors aren't well behaved.
* Many vendors with T-Head CPU cores which implement the 0.7.1
* version of the vector specification put "v" into their DTs.
* CPU cores with the ratified spec will contain non-zero
* marchid.
*/
if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
riscv_cached_marchid(cpu) == 0x0) {
this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
}
/*
* All "okay" hart should have same isa. Set HWCAP based on
* common capabilities of every "okay" hart, in case they don't

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* This code come from arch/arm64/kernel/return_address.c
*
* Copyright (C) 2023 SiFive.
*/
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/stacktrace.h>
struct return_address_data {
unsigned int level;
void *addr;
};
static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
data->addr = (void *)pc;
return false;
}
--data->level;
return true;
}
NOKPROBE_SYMBOL(save_return_addr);
noinline void *return_address(unsigned int level)
{
struct return_address_data data;
data.level = level + 3;
data.addr = NULL;
arch_stack_walk(save_return_addr, &data, current, NULL);
if (!data.level)
return data.addr;
else
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
NOKPROBE_SYMBOL(return_address);

View File

@ -426,10 +426,12 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return __hugetlb_valid_size(size);
}
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h)
{
return __hugetlb_valid_size(huge_page_size(h));
}
#endif
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)

View File

@ -885,6 +885,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@ -954,6 +955,7 @@ restore_all_switch_stack:
/* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code
CLEAR_CPU_BUFFERS
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@ -1146,6 +1148,7 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:

View File

@ -223,6 +223,7 @@ syscall_return_via_sysret:
SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
swapgs
CLEAR_CPU_BUFFERS
sysretq
SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
@ -663,6 +664,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
/* Restore RDI. */
popq %rdi
swapgs
CLEAR_CPU_BUFFERS
jmp .Lnative_iret
@ -774,6 +776,8 @@ native_irq_return_ldt:
*/
popq %rax /* Restore user RAX */
CLEAR_CPU_BUFFERS
/*
* RSP now points to an ordinary IRET frame, except that the page
* is read-only and RSP[31:16] are preloaded with the userspace
@ -1502,6 +1506,12 @@ nmi_restore:
std
movq $0, 5*8(%rsp) /* clear "NMI executing" */
/*
* Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
* NMI in kernel after user state is restored. For an unprivileged user
* these conditions are hard to meet.
*/
/*
* iretq reads the "iret" frame and exits the NMI stack in a
* single instruction. We are returning to kernel mode, so this
@ -1520,6 +1530,7 @@ SYM_CODE_START(ignore_sysret)
UNWIND_HINT_END_OF_STACK
ENDBR
mov $-ENOSYS, %eax
CLEAR_CPU_BUFFERS
sysretl
SYM_CODE_END(ignore_sysret)
#endif

View File

@ -271,6 +271,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
xorl %r9d, %r9d
xorl %r10d, %r10d
swapgs
CLEAR_CPU_BUFFERS
sysretl
SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR

View File

@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
mds_user_clear_cpu_buffers();
amd_clear_divider();
}
#define arch_exit_to_user_mode arch_exit_to_user_mode

View File

@ -549,7 +549,6 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
@ -583,17 +582,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
}
/**
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
*/
static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
}
/**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
*

View File

@ -111,9 +111,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before returning to user space */
DEFINE_STATIC_KEY_FALSE(mds_user_clear);
EXPORT_SYMBOL_GPL(mds_user_clear);
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
@ -252,7 +249,7 @@ static void __init mds_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt()))
@ -356,7 +353,7 @@ static void __init taa_select_mitigation(void)
* For guests that can't determine whether the correct microcode is
* present on host, enable the mitigation for UCODE_NEEDED as well.
*/
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
@ -424,7 +421,7 @@ static void __init mmio_select_mitigation(void)
*/
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM)))
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
static_branch_enable(&mmio_stale_data_clear);
@ -484,12 +481,12 @@ static void __init md_clear_update_mitigation(void)
if (cpu_mitigations_off())
return;
if (!static_key_enabled(&mds_user_clear))
if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
goto out;
/*
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
* mitigation, if necessary.
* X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
* Stale Data mitigation, if necessary.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {

View File

@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false;
}
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme_early(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@ -335,6 +419,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
/*
* Adjust the number of physical bits early because it affects the
* valid bits of the MTRR mask registers.
*/
if (cpu_has(c, X86_FEATURE_TME))
detect_tme_early(c);
}
static void bsp_init_intel(struct cpuinfo_x86 *c)
@ -495,90 +586,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@ -715,9 +722,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_ia32_feat_ctl(c);
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
init_intel_misc_features(c);
split_lock_init();

View File

@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
/*
* SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
* to be reserved.
* SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
* kexec and do not need to be reserved.
*/
if (data->type != SETUP_EFI && data->type != SETUP_IMA)
if (data->type != SETUP_EFI &&
data->type != SETUP_IMA &&
data->type != SETUP_RNG_SEED)
e820__range_update_kexec(pa_data,
sizeof(*data) + data->len,
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);

View File

@ -556,9 +556,6 @@ nmi_restart:
}
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
if (user_mode(regs))
mds_user_clear_cpu_buffers();
}
#if IS_ENABLED(CONFIG_KVM_INTEL)

View File

@ -2,7 +2,10 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
#define VMX_RUN_VMRESUME (1 << 0)
#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
#define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

View File

@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
test $VMX_RUN_VMRESUME, %ebx
bt $VMX_RUN_VMRESUME_SHIFT, %ebx
/* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@ -161,8 +161,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */
jz .Lvmlaunch
/* Clobbers EFLAGS.ZF */
CLEAR_CPU_BUFFERS
/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
jnc .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"

View File

@ -387,7 +387,16 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
/*
* Disable VERW's behavior of clearing CPU buffers for the guest if the
* CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
* the mitigation. Disabling the clearing behavior provides a
* performance boost for guests that aren't aware that manually clearing
* CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
* and VM-Exit.
*/
vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
(host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
!boot_cpu_has_bug(X86_BUG_MDS) &&
!boot_cpu_has_bug(X86_BUG_TAA);
@ -7226,11 +7235,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
/*
* L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mds_user_clear))
mds_clear_cpu_buffers();
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();

View File

@ -115,6 +115,9 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
@ -139,6 +142,7 @@ struct ublk_queue {
bool force_abort;
bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
struct ublk_io ios[];
};
@ -1477,28 +1481,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
return ubq->nr_io_ready == ubq->q_depth;
}
static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;
if (!ublk_queue_ready(ubq))
return;
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_complete_in_task(io->cmd,
ublk_cmd_cancel_cb);
}
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
bool done;
/* all io commands are canceled */
ubq->nr_io_ready = 0;
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
io->flags |= UBLK_IO_FLAG_CANCELED;
spin_unlock(&ubq->cancel_lock);
if (!done)
io_uring_cmd_done(io->cmd,
UBLK_IO_RES_ABORT, 0,
IO_URING_F_UNLOCKED);
}
}
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@ -1545,7 +1549,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
blk_mq_quiesce_queue(ub->ub_disk->queue);
ublk_wait_tagset_rqs_idle(ub);
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
ublk_cancel_dev(ub);
/* we are going to release task_struct of ubq_daemon and resets
* ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
* Besides, monitor_work is not necessary in QUIESCED state since we have
@ -1568,6 +1571,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
__ublk_quiesce_dev(ub);
unlock:
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
static void ublk_unquiesce_dev(struct ublk_device *ub)
@ -1607,8 +1611,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
put_disk(ub->ub_disk);
ub->ub_disk = NULL;
unlock:
ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
cancel_delayed_work_sync(&ub->monitor_work);
}
@ -1962,6 +1966,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
void *ptr;
int size;
spin_lock_init(&ubq->cancel_lock);
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
@ -2569,8 +2574,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
int i;
WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
/* All old ioucmds have to be completed */
WARN_ON_ONCE(ubq->nr_io_ready);
ubq->nr_io_ready = 0;
/* old daemon is PF_EXITING, put it now */
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */

View File

@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);

View File

@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
kfree_skb(skb);
return 0;
@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
hdev->setup = bcm4377_hci_setup;
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (bcm4377->hw->broken_mws_transport_config)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)

View File

@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
static void hci_coredump_qca(struct hci_dev *hdev)
{
int err;
static const u8 param[] = { 0x26 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
if (IS_ERR(skb))
bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
kfree_skb(skb);
err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
if (err < 0)
bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
static int qca_setup(struct hci_uart *hu)
@ -1882,7 +1882,17 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
/* Set BDA quirk bit for reading BDA value from fwnode property
* only if that property exist in DT.
*/
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
} else {
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
}
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);

View File

@ -2987,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
if (min_pstate > cpu->max_perf_ratio)
min_pstate = cpu->max_perf_ratio;
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;

View File

@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
{
/*
* In case of remote eDMA engine setup, the DW PCIe RP/EP internal
* configuration registers and application memory are normally accessed
* over different buses. Ensure LL-data reaches the memory before the
* doorbell register is toggled by issuing the dummy-read from the remote
* LL memory in a hope that the MRd TLP will return only after the
* last MWr TLP is completed
*/
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
readl(chunk->ll_region.vaddr.io);
}
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
dw_edma_v0_sync_ll_data(chunk);
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));

View File

@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
u32 num_ch = 0;
int id;
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
num_ch++;
}
if (num_ch > HDMA_V0_MAX_NR_CH)
num_ch = HDMA_V0_MAX_NR_CH;
return (u16)num_ch;
/*
* The HDMA IP have no way to know the number of hardware channels
* available, we set it to maximum channels and let the platform
* set the right number of channels.
*/
return HDMA_V0_MAX_NR_CH;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
{
/*
* In case of remote HDMA engine setup, the DW PCIe RP/EP internal
* configuration registers and application memory are normally accessed
* over different buses. Ensure LL-data reaches the memory before the
* doorbell register is toggled by issuing the dummy-read from the remote
* LL memory in a hope that the MRd TLP will return only after the
* last MWr TLP is completed
*/
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
readl(chunk->ll_region.vaddr.io);
}
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
dw_hdma_v0_sync_ll_data(chunk);
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}

View File

@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)

View File

@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = evl->head;
h = status.head;
size = evl->size;
while (h != t) {

View File

@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock);
h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
h = evl_status.head;
evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",

View File

@ -286,7 +286,6 @@ struct idxd_evl {
unsigned int log_size;
/* The number of entries in the event log. */
u16 size;
u16 head;
unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT];
};

View File

@ -342,7 +342,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
unsigned int evl_cache_size;
struct idxd_evl *evl;
const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0)
return 0;
@ -354,9 +356,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
0, 0, NULL);
idxd_name = dev_name(idxd_confdev(idxd));
evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
/*
* Since completion record in evl_cache will be copied to user
* when handling completion record page fault, need to create
* the cache suitable for user copy.
*/
idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
0, 0, 0, evl_cache_size,
NULL);
if (!idxd->evl_cache) {
kfree(evl);
return -ENOMEM;

View File

@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
h = evl_status.head;
size = idxd->evl->size;
while (h != t) {
@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size;
}
evl->head = h;
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);

View File

@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;

View File

@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);

View File

@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@ -156,6 +154,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;

View File

@ -894,11 +894,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@ -923,13 +923,13 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
err_remove_acpi_chip:
acpi_gpiochip_remove(gc);
err_remove_of_chip:
err_free_hogs:
gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */

View File

@ -66,6 +66,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@ -119,6 +121,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
apply_edid_quirks(edid_buf, edid_caps);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@ -145,8 +149,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
apply_edid_quirks(edid_buf, edid_caps);
kfree(sads);
kfree(sadb);

View File

@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
static int si_set_temperature_range(struct amdgpu_device *adev)
{
int ret;
ret = si_thermal_enable_alert(adev, false);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
return ret;
}
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
if (ret)
return ret;
#if 0 //TODO ?
si_dpm_powergate_uvd(adev, true);
#endif
return 0;
}

View File

@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
if (block_start < start || block_end > end) {
u64 adjusted_start = max(block_start, start);
u64 adjusted_end = min(block_end, end);
if (round_down(adjusted_end + 1, req_size) <=
round_up(adjusted_start, req_size))
continue;
}
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*

View File

@ -708,10 +708,11 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
nouveau_sched_fini(drm);
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nouveau_sched_fini(drm);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);

View File

@ -1242,9 +1242,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
/*
* Only take over from a potential firmware framebuffer if any CRTCs
* have been registered. This must not be a fatal error because there
* are other accelerators that are exposed via this driver.
*
* Another case where this happens is on Tegra234 where the display
* hardware is no longer part of the host1x complex, so this driver
* will not expose any modesetting features.
*/
if (drm->mode_config.num_crtc > 0) {
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
} else {
/*
* Indicate to userspace that this doesn't expose any display
* capabilities.
*/
drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
err = drm_dev_register(drm, 0);
if (err < 0)

View File

@ -1158,20 +1158,23 @@ out_unlock:
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
{
u32 new_id;
int rc;
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
xa_limit_16b, GFP_KERNEL_ACCOUNT);
rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
GFP_KERNEL_ACCOUNT);
if (rc)
goto out_unlock;
rc = iopt_calculate_iova_alignment(iopt);
if (rc) {
xa_erase(&iopt->access_list, access->iopt_access_list_id);
xa_erase(&iopt->access_list, new_id);
goto out_unlock;
}
access->iopt_access_list_id = new_id;
out_unlock:
up_write(&iopt->iova_rwsem);

View File

@ -1007,10 +1007,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;

View File

@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
host->dma_in_progress = true;
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
static void sdmmc_idma_error(struct mmci_host *host)
{
struct mmc_data *data = host->data;
struct sdmmc_idma *idma = host->dma_priv;
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
data->host_cookie = 0;
if (!idma->use_bounce_buffer)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
.dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,

View File

@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
static int xenon_check_stability_internal_clk(struct sdhci_host *host)
{
u32 reg;
int err;
err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
if (err)
dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
return err;
}
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
int ret = xenon_check_stability_internal_clk(host);
if (ret)
return ret;
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
/* wait for host eMMC PHY init completes */
udelay(wait);
reg = sdhci_readl(host, phy_regs->timing_adj);
reg &= XENON_PHY_INITIALIZAION;
if (reg) {
/*
* AC5X spec says bit must be polled until zero.
* We see cases in which timeout can take longer
* than the standard calculation on AC5X, which is
* expected following the spec comment above.
* According to the spec, we must wait as long as
* it takes for that bit to toggle on AC5X.
* Cap that with 100 delay loops so we won't get
* stuck here forever:
*/
ret = read_poll_timeout(sdhci_readl, reg,
!(reg & XENON_PHY_INITIALIZAION),
wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
false, host, phy_regs->timing_adj);
if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
wait);
return -ETIMEDOUT;
}
wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
return 0;
return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1

View File

@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
};
/**

View File

@ -1077,6 +1077,14 @@ int memac_initialization(struct mac_device *mac_dev,
struct fwnode_handle *mac_fwnode = dev->fwnode;
struct fwnode_handle *fixed;
/* The internal connection to the serdes is XGMII, but this isn't
* really correct for the phy mode (which is the external connection).
* However, this is how all older device trees say that they want
* 10GBASE-R (aka XFI), so just convert it for them.
*/
if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;

View File

@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;

View File

@ -3927,8 +3927,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
if (priv->fpe_wq)
if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
priv->fpe_wq = NULL;
}
netdev_info(priv->dev, "FPE workqueue stop");
}

View File

@ -1903,26 +1903,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
err = rtnl_link_register(&gtp_link_ops);
err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
err = register_pernet_subsys(&gtp_net_ops);
err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
goto unreg_rtnl_link;
goto unreg_pernet_subsys;
err = genl_register_family(&gtp_genl_family);
if (err < 0)
goto unreg_pernet_subsys;
goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;

View File

@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);

View File

@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
return err;
return 0;
}
netdev_dbg(dev->net,

View File

@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
lan78xx_rx_urb_submit_all(dev);
local_bh_disable();
napi_schedule(&dev->napi);
local_bh_enable();
}
return 0;
@ -3035,7 +3037,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */

View File

@ -1200,14 +1200,6 @@ static int veth_enable_xdp(struct net_device *dev)
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding XDP
* is supposed to get GRO working
*/
dev->features |= NETIF_F_GRO;
netdev_features_change(dev);
}
}
}
@ -1227,18 +1219,9 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
if (!netif_running(dev) || !veth_gro_requested(dev)) {
if (!netif_running(dev) || !veth_gro_requested(dev))
veth_napi_del(dev);
/* if user-space did not require GRO, since adding XDP
* enabled it, clear it now
*/
if (!veth_gro_requested(dev) && netif_running(dev)) {
dev->features &= ~NETIF_F_GRO;
netdev_features_change(dev);
}
}
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
@ -1470,7 +1453,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq)
return -ENOMEM;
@ -1486,7 +1470,7 @@ static void veth_free_queues(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
kfree(priv->rq);
kvfree(priv->rq);
}
static int veth_dev_init(struct net_device *dev)
@ -1646,6 +1630,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding
* XDP is supposed to get GRO working
*/
dev->features |= NETIF_F_GRO;
netdev_features_change(dev);
}
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@ -1661,6 +1653,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
/* if user-space did not require GRO, since adding XDP
* enabled it, clear it now
*/
if (!veth_gro_requested(dev)) {
dev->features &= ~NETIF_F_GRO;
netdev_features_change(dev);
}
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;

View File

@ -1302,7 +1302,7 @@ static struct device_node *parse_remote_endpoint(struct device_node *np,
int index)
{
/* Return NULL for index > 0 to signify end of remote-endpoints. */
if (!index || strcmp(prop_name, "remote-endpoint"))
if (index > 0 || strcmp(prop_name, "remote-endpoint"))
return NULL;
return of_graph_get_remote_port_parent(np);

View File

@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
if (!rvpmu->ctr_get_width)
/**
* If the pmu driver doesn't support counter width, set it to default
* maximum allowed by the specification.
*/
cwidth = 63;
else {
if (hwc->idx == -1)
/* Handle init case where idx is not initialized yet */
cwidth = rvpmu->ctr_get_width(0);
else
cwidth = rvpmu->ctr_get_width(hwc->idx);
}
if (hwc->idx == -1)
/* Handle init case where idx is not initialized yet */
cwidth = rvpmu->ctr_get_width(0);
else
cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}

View File

@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
/* cycle & instret are always 64 bit, one bit less according to SBI spec */
static int pmu_legacy_ctr_get_width(int idx)
{
return 63;
}
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
pmu->ctr_get_width = NULL;
pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}

View File

@ -616,6 +616,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
unsigned int peer_enabled_corner;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@ -625,9 +626,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
this_sleep_corner = pd->level_count - 1;
}
if (peer && peer->enabled)
to_active_sleep(peer, peer->corner, &peer_active_corner,
if (peer && peer->enabled) {
peer_enabled_corner = max(peer->corner, peer->enable_corner);
to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
}
active_corner = max(this_active_corner, peer_active_corner);

View File

@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
free_irq(client->irq, di);
if (client->irq)
free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);

View File

@ -268,10 +268,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
else
pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
"failed to initialize pdr\n");
return ret;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
return ret;
goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@ -284,17 +291,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
goto out_release_aux_devices;
}
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
goto out_release_pdr_handle;
goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@ -303,8 +304,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
out_release_pdr_handle:
pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@ -314,6 +313,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
out_release_pdr_handle:
pdr_handle_release(pg->pdr);
return ret;
}

View File

@ -1904,10 +1904,9 @@ static void cqspi_remove(struct platform_device *pdev)
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = spi_controller_suspend(host);
ret = spi_controller_suspend(cqspi->host);
cqspi_controller_enable(cqspi, 0);
clk_disable_unprepare(cqspi->clk);
@ -1918,7 +1917,6 @@ static int cqspi_suspend(struct device *dev)
static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@ -1927,7 +1925,7 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
return spi_controller_resume(host);
return spi_controller_resume(cqspi->host);
}
static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);

View File

@ -2398,11 +2398,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
char *old_data = NULL;
u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@ -2436,13 +2434,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
if (old_data && (--REFCOUNT(old_data) == 0))
if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
vc->vc_font.data = (void *)old_data;
vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;

View File

@ -479,8 +479,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
dire->u.name[0] == '.' &&
ctx->actor != afs_lookup_filldir &&
ctx->actor != afs_lookup_one_filldir &&
memcmp(dire->u.name, ".__afs", 6) == 0)
memcmp(dire->u.name, ".__afs", 6) == 0) {
ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
continue;
}
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,

View File

@ -726,6 +726,23 @@ leave:
return ret;
}
static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
{
if (args->start.srcdevid == 0) {
if (memchr(args->start.srcdev_name, 0,
sizeof(args->start.srcdev_name)) == NULL)
return -ENAMETOOLONG;
} else {
args->start.srcdev_name[0] = 0;
}
if (memchr(args->start.tgtdev_name, 0,
sizeof(args->start.tgtdev_name)) == NULL)
return -ENAMETOOLONG;
return 0;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@ -738,10 +755,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
ret = btrfs_check_replace_dev_names(args);
if (ret < 0)
return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,

View File

@ -1282,12 +1282,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
*
* @objectid: root id
* @anon_dev: preallocated anonymous block device number for new roots,
* pass 0 for new allocation.
* pass NULL for a new allocation.
* @check_ref: whether to check root item references, If true, return -ENOENT
* for orphan roots
*/
static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
u64 objectid, dev_t anon_dev,
u64 objectid, dev_t *anon_dev,
bool check_ref)
{
struct btrfs_root *root;
@ -1317,9 +1317,9 @@ again:
* that common but still possible. In that case, we just need
* to free the anon_dev.
*/
if (unlikely(anon_dev)) {
free_anon_bdev(anon_dev);
anon_dev = 0;
if (unlikely(anon_dev && *anon_dev)) {
free_anon_bdev(*anon_dev);
*anon_dev = 0;
}
if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
@ -1341,7 +1341,7 @@ again:
goto fail;
}
ret = btrfs_init_fs_root(root, anon_dev);
ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
if (ret)
goto fail;
@ -1377,7 +1377,7 @@ fail:
* root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
* and once again by our caller.
*/
if (anon_dev)
if (anon_dev && *anon_dev)
root->anon_dev = 0;
btrfs_put_root(root);
return ERR_PTR(ret);
@ -1393,7 +1393,7 @@ fail:
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref)
{
return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
}
/*
@ -1401,11 +1401,11 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
* the anonymous block device id
*
* @objectid: tree objectid
* @anon_dev: if zero, allocate a new anonymous block device or use the
* parameter value
* @anon_dev: if NULL, allocate a new anonymous block device or use the
* parameter value if not NULL
*/
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, dev_t anon_dev)
u64 objectid, dev_t *anon_dev)
{
return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
}

View File

@ -64,7 +64,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref);
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, dev_t anon_dev);
u64 objectid, dev_t *anon_dev);
struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 objectid);

View File

@ -2437,6 +2437,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache,
u64 offset, u64 phys, u64 len, u32 flags)
{
u64 cache_end;
int ret = 0;
/* Set at the end of extent_fiemap(). */
@ -2446,15 +2447,102 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
goto assign;
/*
* Sanity check, extent_fiemap() should have ensured that new
* fiemap extent won't overlap with cached one.
* Not recoverable.
* When iterating the extents of the inode, at extent_fiemap(), we may
* find an extent that starts at an offset behind the end offset of the
* previous extent we processed. This happens if fiemap is called
* without FIEMAP_FLAG_SYNC and there are ordered extents completing
* while we call btrfs_next_leaf() (through fiemap_next_leaf_item()).
*
* NOTE: Physical address can overlap, due to compression
* For example we are in leaf X processing its last item, which is the
* file extent item for file range [512K, 1M[, and after
* btrfs_next_leaf() releases the path, there's an ordered extent that
* completes for the file range [768K, 2M[, and that results in trimming
* the file extent item so that it now corresponds to the file range
* [512K, 768K[ and a new file extent item is inserted for the file
* range [768K, 2M[, which may end up as the last item of leaf X or as
* the first item of the next leaf - in either case btrfs_next_leaf()
* will leave us with a path pointing to the new extent item, for the
* file range [768K, 2M[, since that's the first key that follows the
* last one we processed. So in order not to report overlapping extents
* to user space, we trim the length of the previously cached extent and
* emit it.
*
* Upon calling btrfs_next_leaf() we may also find an extent with an
* offset smaller than or equals to cache->offset, and this happens
* when we had a hole or prealloc extent with several delalloc ranges in
* it, but after btrfs_next_leaf() released the path, delalloc was
* flushed and the resulting ordered extents were completed, so we can
* now have found a file extent item for an offset that is smaller than
* or equals to what we have in cache->offset. We deal with this as
* described below.
*/
if (cache->offset + cache->len > offset) {
WARN_ON(1);
return -EINVAL;
cache_end = cache->offset + cache->len;
if (cache_end > offset) {
if (offset == cache->offset) {
/*
* We cached a dealloc range (found in the io tree) for
* a hole or prealloc extent and we have now found a
* file extent item for the same offset. What we have
* now is more recent and up to date, so discard what
* we had in the cache and use what we have just found.
*/
goto assign;
} else if (offset > cache->offset) {
/*
* The extent range we previously found ends after the
* offset of the file extent item we found and that
* offset falls somewhere in the middle of that previous
* extent range. So adjust the range we previously found
* to end at the offset of the file extent item we have
* just found, since this extent is more up to date.
* Emit that adjusted range and cache the file extent
* item we have just found. This corresponds to the case
* where a previously found file extent item was split
* due to an ordered extent completing.
*/
cache->len = offset - cache->offset;
goto emit;
} else {
const u64 range_end = offset + len;
/*
* The offset of the file extent item we have just found
* is behind the cached offset. This means we were
* processing a hole or prealloc extent for which we
* have found delalloc ranges (in the io tree), so what
* we have in the cache is the last delalloc range we
* found while the file extent item we found can be
* either for a whole delalloc range we previously
* emmitted or only a part of that range.
*
* We have two cases here:
*
* 1) The file extent item's range ends at or behind the
* cached extent's end. In this case just ignore the
* current file extent item because we don't want to
* overlap with previous ranges that may have been
* emmitted already;
*
* 2) The file extent item starts behind the currently
* cached extent but its end offset goes beyond the
* end offset of the cached extent. We don't want to
* overlap with a previous range that may have been
* emmitted already, so we emit the currently cached
* extent and then partially store the current file
* extent item's range in the cache, for the subrange
* going the cached extent's end to the end of the
* file extent item.
*/
if (range_end <= cache_end)
return 0;
if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
phys += cache_end - offset;
offset = cache_end;
len = range_end - cache_end;
goto emit;
}
}
/*
@ -2474,6 +2562,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
return 0;
}
emit:
/* Not mergeable, need to submit cached one */
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
cache->len, cache->flags);

View File

@ -721,7 +721,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
free_extent_buffer(leaf);
leaf = NULL;
new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
btrfs_abort_transaction(trans, ret);

View File

@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret)
goto out;
}
if (sctx->cur_inode_last_extent <
sctx->cur_inode_size) {
ret = send_hole(sctx, sctx->cur_inode_size);
if (ret)
if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
ret = range_is_hole_in_parent(sctx,
sctx->cur_inode_last_extent,
sctx->cur_inode_size);
if (ret < 0) {
goto out;
} else if (ret == 0) {
ret = send_hole(sctx, sctx->cur_inode_size);
if (ret < 0)
goto out;
} else {
/* Range is already a hole, skip. */
ret = 0;
}
}
}
if (need_truncate) {

View File

@ -1821,7 +1821,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
key.offset = (u64)-1;
pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
pending->snap = NULL;

View File

@ -372,7 +372,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head)
{
unsigned long variable_name_size = 1024;
unsigned long variable_name_size = 512;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
@ -389,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
goto free;
/*
* Per EFI spec, the maximum storage allocated for both
* the variable name and variable data is 1024 bytes.
* A small set of old UEFI implementations reject sizes
* above a certain threshold, the lowest seen in the wild
* is 512.
*/
do {
variable_name_size = 1024;
variable_name_size = 512;
status = efivar_get_next_variable(&variable_name_size,
variable_name,
@ -431,9 +432,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
break;
case EFI_NOT_FOUND:
break;
case EFI_BUFFER_TOO_SMALL:
pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
variable_name_size);
status = EFI_NOT_FOUND;
break;
default:
printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
status);
pr_warn("efivars: get_next_variable: status=%lx\n", status);
status = EFI_NOT_FOUND;
break;
}

View File

@ -668,8 +668,10 @@ static int nfs_writepage_locked(struct folio *folio,
int err;
if (wbc->sync_mode == WB_SYNC_NONE &&
NFS_SERVER(inode)->write_congested)
NFS_SERVER(inode)->write_congested) {
folio_redirty_for_writepage(wbc, folio);
return AOP_WRITEPAGE_ACTIVATE;
}
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_pageio_init_write(&pgio, inode, 0, false,

View File

@ -2529,7 +2529,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
XATTR_DOSINFO_ITIME;
rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, true);
if (rc)
ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
}
@ -3198,23 +3198,6 @@ int smb2_open(struct ksmbd_work *work)
goto err_out;
}
rc = ksmbd_vfs_getattr(&path, &stat);
if (rc)
goto err_out;
if (stat.result_mask & STATX_BTIME)
fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
else
fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
if (req->FileAttributes || fp->f_ci->m_fattr == 0)
fp->f_ci->m_fattr =
cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
if (!created)
smb2_update_xattrs(tcon, &path, fp);
else
smb2_new_xattrs(tcon, &path, fp);
if (file_present || created)
ksmbd_vfs_kern_path_unlock(&parent_path, &path);
@ -3315,6 +3298,23 @@ int smb2_open(struct ksmbd_work *work)
}
}
rc = ksmbd_vfs_getattr(&path, &stat);
if (rc)
goto err_out1;
if (stat.result_mask & STATX_BTIME)
fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
else
fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
if (req->FileAttributes || fp->f_ci->m_fattr == 0)
fp->f_ci->m_fattr =
cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
if (!created)
smb2_update_xattrs(tcon, &path, fp);
else
smb2_new_xattrs(tcon, &path, fp);
memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
rsp->StructureSize = cpu_to_le16(89);

View File

@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
else {
ubifs_err(c, "old idx added twice!");
kfree(old_idx);
return;
}
}
rb_link_node(&old_idx->rb, parent, p);

View File

@ -83,7 +83,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
} __packed;
} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;

View File

@ -464,6 +464,7 @@ struct nf_ct_hook {
const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
int (*confirm)(struct sk_buff *skb);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;

View File

@ -249,6 +249,7 @@ struct mctp_route {
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
/* always takes ownership of skb */
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);

View File

@ -30,6 +30,8 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
@ -115,8 +117,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd;
for_each_card_rtds(card, rtd) {
if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
return asoc_rtd_to_codec(rtd, 0);
if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name))
return snd_soc_rtd_to_codec(rtd, 0);
}
return NULL;

View File

@ -774,37 +774,42 @@ struct snd_soc_dai_link {
#endif
};
/* REMOVE ME */
#define asoc_link_to_cpu snd_soc_link_to_cpu
#define asoc_link_to_codec snd_soc_link_to_codec
#define asoc_link_to_platform snd_soc_link_to_platform
static inline struct snd_soc_dai_link_component*
asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
return &(link)->cpus[n];
}
static inline struct snd_soc_dai_link_component*
asoc_link_to_codec(struct snd_soc_dai_link *link, int n) {
snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) {
return &(link)->codecs[n];
}
static inline struct snd_soc_dai_link_component*
asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
return &(link)->platforms[n];
}
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
((i) < link->num_codecs) && \
((codec) = asoc_link_to_codec(link, i)); \
((codec) = snd_soc_link_to_codec(link, i)); \
(i)++)
#define for_each_link_platforms(link, i, platform) \
for ((i) = 0; \
((i) < link->num_platforms) && \
((platform) = asoc_link_to_platform(link, i)); \
((platform) = snd_soc_link_to_platform(link, i)); \
(i)++)
#define for_each_link_cpus(link, i, cpu) \
for ((i) = 0; \
((i) < link->num_cpus) && \
((cpu) = asoc_link_to_cpu(link, i)); \
((cpu) = snd_soc_link_to_cpu(link, i)); \
(i)++)
/*
@ -894,8 +899,11 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
#define COMP_CODEC_CONF(_name) { .name = _name }
#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
/* REMOVE ME */
#define asoc_dummy_dlc snd_soc_dummy_dlc
extern struct snd_soc_dai_link_component null_dailink_component[0];
extern struct snd_soc_dai_link_component asoc_dummy_dlc;
extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
struct snd_soc_codec_conf {
@ -1113,8 +1121,8 @@ struct snd_soc_pcm_runtime {
* dais = cpu_dai + codec_dai
* see
* soc_new_pcm_runtime()
* asoc_rtd_to_cpu()
* asoc_rtd_to_codec()
* snd_soc_rtd_to_cpu()
* snd_soc_rtd_to_codec()
*/
struct snd_soc_dai **dais;
@ -1142,10 +1150,16 @@ struct snd_soc_pcm_runtime {
int num_components;
struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
/* REMOVE ME */
#define asoc_rtd_to_cpu snd_soc_rtd_to_cpu
#define asoc_rtd_to_codec snd_soc_rtd_to_codec
#define asoc_substream_to_rtd snd_soc_substream_to_rtd
/* see soc_new_pcm_runtime() */
#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
#define asoc_substream_to_rtd(substream) \
#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
#define snd_soc_substream_to_rtd(substream) \
(struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
#define for_each_rtd_components(rtd, i, component) \
@ -1154,11 +1168,11 @@ struct snd_soc_pcm_runtime {
(i)++)
#define for_each_rtd_cpu_dais(rtd, i, dai) \
for ((i) = 0; \
((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \
(i)++)
#define for_each_rtd_codec_dais(rtd, i, dai) \
for ((i) = 0; \
((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \
(i)++)
#define for_each_rtd_dais(rtd, i, dai) \
for ((i) = 0; \

View File

@ -145,7 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
#define IPV6_TLV_IOAM 49 /* RFC 9486 */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */

View File

@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
[NLA_BE16] = sizeof(__be16),
[NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
[NLA_BE16] = sizeof(__be16),
[NLA_BE32] = sizeof(__be32),
};
/*

View File

@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
vaddr &= HPAGE_PUD_MASK;
pud = pfn_pud(args->pud_pfn, args->page_prot);
/*
* Some architectures have debug checks to make sure
* huge pud mapping are only found with devmap entries
* For now test with only devmap entries.
*/
pud = pud_mkdevmap(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_wrprotect(pud);
pud = pud_mkclean(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_mkyoung(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);

View File

@ -4159,28 +4159,40 @@ static void filemap_cachestat(struct address_space *mapping,
rcu_read_lock();
xas_for_each(&xas, folio, last_index) {
int order;
unsigned long nr_pages;
pgoff_t folio_first_index, folio_last_index;
/*
* Don't deref the folio. It is not pinned, and might
* get freed (and reused) underneath us.
*
* We *could* pin it, but that would be expensive for
* what should be a fast and lightweight syscall.
*
* Instead, derive all information of interest from
* the rcu-protected xarray.
*/
if (xas_retry(&xas, folio))
continue;
order = xa_get_order(xas.xa, xas.xa_index);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
if (xa_is_value(folio)) {
/* page is evicted */
void *shadow = (void *)folio;
bool workingset; /* not used */
int order = xa_get_order(xas.xa, xas.xa_index);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
cs->nr_evicted += nr_pages;
@ -4198,24 +4210,13 @@ static void filemap_cachestat(struct address_space *mapping,
goto resched;
}
nr_pages = folio_nr_pages(folio);
folio_first_index = folio_pgoff(folio);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
/* page is in cache */
cs->nr_cache += nr_pages;
if (folio_test_dirty(folio))
if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
cs->nr_dirty += nr_pages;
if (folio_test_writeback(folio))
if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
cs->nr_writeback += nr_pages;
resched:

View File

@ -1049,6 +1049,7 @@ static void hci_error_reset(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
hci_dev_hold(hdev);
BT_DBG("%s", hdev->name);
if (hdev->hw_error)
@ -1056,10 +1057,10 @@ static void hci_error_reset(struct work_struct *work)
else
bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
if (hci_dev_do_close(hdev))
return;
if (!hci_dev_do_close(hdev))
hci_dev_do_open(hdev);
hci_dev_do_open(hdev);
hci_dev_put(hdev);
}
void hci_uuids_clear(struct hci_dev *hdev)

View File

@ -5329,9 +5329,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn || !hci_conn_ssp_enabled(conn))
if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
goto unlock;
/* Assume remote supports SSP since it has triggered this event */
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
hci_conn_hold(conn);
if (!hci_dev_test_flag(hdev, HCI_MGMT))
@ -6794,6 +6797,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
if (max > hcon->le_conn_max_interval)
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
@ -7430,10 +7437,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
* keep track of the bdaddr of the connection event that woke us up.
*/
if (event == HCI_EV_CONN_REQUEST) {
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_CONN_COMPLETE) {
bacpy(&hdev->wake_addr, &conn_request->bdaddr);
bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_LE_META) {
struct hci_ev_le_meta *le_ev = (void *)skb->data;

View File

@ -2274,8 +2274,11 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
/* During suspend, only wakeable devices can be in acceptlist */
if (hdev->suspended &&
!(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
!(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
hci_le_del_accept_list_sync(hdev, &params->addr,
params->addr_type);
return 0;
}
/* Select filter policy to accept all advertising */
if (*num_entries >= hdev->le_accept_list_size)
@ -5633,7 +5636,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
bt_dev_dbg(hdev, "");
if (hci_dev_test_flag(hdev, HCI_INQUIRY))
if (test_bit(HCI_INQUIRY, &hdev->flags))
return 0;
hci_dev_lock(hdev);

View File

@ -5613,7 +5613,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
err = hci_check_conn_params(min, max, latency, to_multiplier);
if (max > hcon->le_conn_max_interval) {
BT_DBG("requested connection interval exceeds current bounds.");
err = -EINVAL;
} else {
err = hci_check_conn_params(min, max, latency, to_multiplier);
}
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else

View File

@ -43,6 +43,10 @@
#include <linux/sysctl.h>
#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack_core.h>
#endif
static unsigned int brnf_net_id __read_mostly;
struct brnf_net {
@ -553,6 +557,90 @@ static unsigned int br_nf_pre_routing(void *priv,
return NF_STOLEN;
}
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* conntracks' nf_confirm logic cannot handle cloned skbs referencing
* the same nf_conn entry, which will happen for multicast (broadcast)
* Frames on bridges.
*
* Example:
* macvlan0
* br0
* ethX ethY
*
* ethX (or Y) receives multicast or broadcast packet containing
* an IP packet, not yet in conntrack table.
*
* 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
* -> skb->_nfct now references a unconfirmed entry
* 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
* interface.
* 3. skb gets passed up the stack.
* 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
* and schedules a work queue to send them out on the lower devices.
*
* The clone skb->_nfct is not a copy, it is the same entry as the
* original skb. The macvlan rx handler then returns RX_HANDLER_PASS.
* 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
*
* The Macvlan broadcast worker and normal confirm path will race.
*
* This race will not happen if step 2 already confirmed a clone. In that
* case later steps perform skb_clone() with skb->_nfct already confirmed (in
* hash table). This works fine.
*
* But such confirmation won't happen when eb/ip/nftables rules dropped the
* packets before they reached the nf_confirm step in postrouting.
*
* Work around this problem by explicit confirmation of the entry at
* LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
* entry.
*
*/
static unsigned int br_nf_local_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conntrack *nfct = skb_nfct(skb);
const struct nf_ct_hook *ct_hook;
struct nf_conn *ct;
int ret;
if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
ct = container_of(nfct, struct nf_conn, ct_general);
if (likely(nf_ct_is_confirmed(ct)))
return NF_ACCEPT;
WARN_ON_ONCE(skb_shared(skb));
WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
/* We can't call nf_confirm here, it would create a dependency
* on nf_conntrack module.
*/
ct_hook = rcu_dereference(nf_ct_hook);
if (!ct_hook) {
skb->_nfct = 0ul;
nf_conntrack_put(nfct);
return NF_ACCEPT;
}
nf_bridge_pull_encap_header(skb);
ret = ct_hook->confirm(skb);
switch (ret & NF_VERDICT_MASK) {
case NF_STOLEN:
return NF_STOLEN;
default:
nf_bridge_push_encap_header(skb);
break;
}
ct = container_of(nfct, struct nf_conn, ct_general);
WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
return ret;
}
#endif
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@ -962,6 +1050,14 @@ static const struct nf_hook_ops br_nf_ops[] = {
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
{
.hook = br_nf_local_in,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_LAST,
},
#endif
{
.hook = br_nf_forward_ip,
.pf = NFPROTO_BRIDGE,

View File

@ -291,6 +291,30 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
return nf_conntrack_in(skb, &bridge_state);
}
static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
if (skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
/* nf_conntrack_confirm() cannot handle concurrent clones,
* this happens for broad/multicast frames with e.g. macvlan on top
* of the bridge device.
*/
ct = nf_ct_get(skb, &ctinfo);
if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
return NF_ACCEPT;
/* let inet prerouting call conntrack again */
skb->_nfct = 0;
nf_ct_put(ct);
return NF_ACCEPT;
}
static void nf_ct_bridge_frag_save(struct sk_buff *skb,
struct nf_bridge_frag_data *data)
{
@ -385,6 +409,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = nf_ct_bridge_in,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = nf_ct_bridge_post,
.pf = NFPROTO_BRIDGE,

View File

@ -5136,10 +5136,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
@ -5157,11 +5156,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
have_flags = true;
br_flags_attr = attr;
flags = nla_get_u16(attr);
}
@ -5205,8 +5204,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
if (br_flags_attr)
memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
out:
return err;
}

View File

@ -83,7 +83,7 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
return false;
/* Get next tlv */
total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
total_length += hsr_sup_tag->tlv.HSR_TLV_length;
if (!pskb_may_pull(skb, total_length))
return false;
skb_pull(skb, total_length);

View File

@ -554,6 +554,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
{
/* we must cap headroom to some upperlimit, else pskb_expand_head
* will overflow header offsets in skb_headers_offset_update().
*/
static const unsigned int max_allowed = 512;
if (headroom > max_allowed)
headroom = max_allowed;
if (headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, headroom);
}
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
@ -632,13 +646,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
if (skb_cow_head(skb, headroom)) {
ip_rt_put(rt);
goto tx_dropped;
}
ip_tunnel_adj_headroom(dev, headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
@ -818,16 +832,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > READ_ONCE(dev->needed_headroom))
WRITE_ONCE(dev->needed_headroom, max_headroom);
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
if (skb_cow_head(skb, max_headroom)) {
ip_rt_put(rt);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return;
}
ip_tunnel_adj_headroom(dev, max_headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;

Some files were not shown because too many files have changed in this diff Show More