Merge tag 'v6.6.3' into lf-6.6.y

This is the 6.6.3 stable release

* tag 'v6.6.3': (526 commits)
  Linux 6.6.3
  drm/amd/display: Change the DMCUB mailbox memory location from FB to inbox
  drm/amd/display: Clear dpcd_sink_ext_caps if not set
  ...

Signed-off-by: Jason Liu <jason.hui.liu@nxp.com>

 Conflicts:
	arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
	drivers/usb/dwc3/core.c
This commit is contained in:
Jason Liu 2023-11-29 15:04:49 -06:00
commit f75d905095
588 changed files with 5984 additions and 3312 deletions

View File

@ -5858,6 +5858,13 @@
This feature may be more efficiently disabled
using the csdlock_debug- kernel parameter.
smp.panic_on_ipistall= [KNL]
If a csd_lock_timeout extends for more than
the specified number of milliseconds, panic the
system. By default, let CSD-lock acquisition
take as long as they take. Specifying 300,000
for this value provides a 5-minute timeout.
smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
smsc-ircc2.ircc_sir= [HW] SIR base I/O port

View File

@ -32,6 +32,27 @@ properties:
vdd3-supply: true
qcom,tune-usb2-disc-thres:
$ref: /schemas/types.yaml#/definitions/uint8
description: High-Speed disconnect threshold
minimum: 0
maximum: 7
default: 0
qcom,tune-usb2-amplitude:
$ref: /schemas/types.yaml#/definitions/uint8
description: High-Speed trasmit amplitude
minimum: 0
maximum: 15
default: 8
qcom,tune-usb2-preem:
$ref: /schemas/types.yaml#/definitions/uint8
description: High-Speed TX pre-emphasis tuning
minimum: 0
maximum: 7
default: 5
required:
- compatible
- reg

View File

@ -96,7 +96,7 @@ then:
rts-gpios: false
patternProperties:
"^bluetooth|gnss|gps|mcu$":
"^(bluetooth|gnss|gps|mcu)$":
if:
type: object
then:

View File

@ -169,27 +169,27 @@ properties:
- const: tgib0
- const: tgic0
- const: tgid0
- const: tgiv0
- const: tciv0
- const: tgie0
- const: tgif0
- const: tgia1
- const: tgib1
- const: tgiv1
- const: tgiu1
- const: tciv1
- const: tciu1
- const: tgia2
- const: tgib2
- const: tgiv2
- const: tgiu2
- const: tciv2
- const: tciu2
- const: tgia3
- const: tgib3
- const: tgic3
- const: tgid3
- const: tgiv3
- const: tciv3
- const: tgia4
- const: tgib4
- const: tgic4
- const: tgid4
- const: tgiv4
- const: tciv4
- const: tgiu5
- const: tgiv5
- const: tgiw5
@ -197,18 +197,18 @@ properties:
- const: tgib6
- const: tgic6
- const: tgid6
- const: tgiv6
- const: tciv6
- const: tgia7
- const: tgib7
- const: tgic7
- const: tgid7
- const: tgiv7
- const: tciv7
- const: tgia8
- const: tgib8
- const: tgic8
- const: tgid8
- const: tgiv8
- const: tgiu8
- const: tciv8
- const: tciu8
clocks:
maxItems: 1
@ -285,16 +285,16 @@ examples:
<GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tgiv0", "tgie0",
interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0", "tciv0", "tgie0",
"tgif0",
"tgia1", "tgib1", "tgiv1", "tgiu1",
"tgia2", "tgib2", "tgiv2", "tgiu2",
"tgia3", "tgib3", "tgic3", "tgid3", "tgiv3",
"tgia4", "tgib4", "tgic4", "tgid4", "tgiv4",
"tgia1", "tgib1", "tciv1", "tciu1",
"tgia2", "tgib2", "tciv2", "tciu2",
"tgia3", "tgib3", "tgic3", "tgid3", "tciv3",
"tgia4", "tgib4", "tgic4", "tgid4", "tciv4",
"tgiu5", "tgiv5", "tgiw5",
"tgia6", "tgib6", "tgic6", "tgid6", "tgiv6",
"tgia7", "tgib7", "tgic7", "tgid7", "tgiv7",
"tgia8", "tgib8", "tgic8", "tgid8", "tgiv8", "tgiu8";
"tgia6", "tgib6", "tgic6", "tgid6", "tciv6",
"tgia7", "tgib7", "tgic7", "tgid7", "tciv7",
"tgia8", "tgib8", "tgic8", "tgid8", "tciv8", "tciu8";
clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
power-domains = <&cpg>;
resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;

View File

@ -47,6 +47,7 @@ Supported adapters:
* Intel Alder Lake (PCH)
* Intel Raptor Lake (PCH)
* Intel Meteor Lake (SOC and PCH)
* Intel Birch Stream (SOC)
Datasheets: Publicly available at the Intel website

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 2
SUBLEVEL = 3
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -10,10 +10,6 @@
#include <linux/interrupt.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry
#endif
#endif /* __ASM_ARM_EXCEPTION_H */

View File

@ -1368,6 +1368,8 @@ choice
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on !LD_IS_LLD || LLD_VERSION >= 130000
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
depends on AS_IS_GNU || AS_VERSION >= 150000
help
Say Y if you plan on running a kernel with a big-endian userspace.

View File

@ -135,7 +135,7 @@
reg = <0x0 0x4a800000 0x0 0x100000>;
no-map;
hwlocks = <&tcsr_mutex 0>;
hwlocks = <&tcsr_mutex 3>;
};
};

View File

@ -211,7 +211,7 @@
smem {
compatible = "qcom,smem";
memory-region = <&smem_region>;
hwlocks = <&tcsr_mutex 0>;
hwlocks = <&tcsr_mutex 3>;
};
soc: soc@0 {
@ -393,7 +393,7 @@
tcsr_mutex: hwlock@1905000 {
compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
reg = <0x0 0x01905000 0x0 0x1000>;
reg = <0x0 0x01905000 0x0 0x20000>;
#hwlock-cells = <1>;
};

View File

@ -101,7 +101,7 @@
reg = <0x0 0x4ab00000 0x0 0x100000>;
no-map;
hwlocks = <&tcsr_mutex 0>;
hwlocks = <&tcsr_mutex 3>;
};
memory@4ac00000 {

View File

@ -195,7 +195,7 @@
smem@4aa00000 {
compatible = "qcom,smem";
reg = <0x0 0x4aa00000 0x0 0x100000>;
hwlocks = <&tcsr_mutex 0>;
hwlocks = <&tcsr_mutex 3>;
no-map;
};
};

View File

@ -137,6 +137,18 @@
vin-supply = <&vcc5v0_sys>;
};
vcc3v3_pcie2x1l0: vcc3v3-pcie2x1l0-regulator {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_m2_1_pwren>;
regulator-name = "vcc3v3_pcie2x1l0";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc5v0_sys>;
};
vcc3v3_pcie30: vcc3v3-pcie30-regulator {
compatible = "regulator-fixed";
enable-active-high;
@ -421,6 +433,14 @@
status = "okay";
};
&pcie2x1l1 {
reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_pcie2x1l0>;
pinctrl-names = "default";
pinctrl-0 = <&pcie2_1_rst>;
status = "okay";
};
&pcie2x1l2 {
reset-gpios = <&gpio4 RK_PA4 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc_3v3_pcie20>;
@ -467,6 +487,10 @@
rockchip,pins = <4 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie2_1_rst: pcie2-1-rst {
rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie2_2_rst: pcie2-2-rst {
rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
@ -474,6 +498,10 @@
pcie_m2_0_pwren: pcie-m20-pwren {
rockchip,pins = <2 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
pcie_m2_1_pwren: pcie-m21-pwren {
rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {

View File

@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
break;
/*
* We only have to consider branch targets that resolve
* to symbols that are defined in a different section.
@ -269,9 +266,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
{
int i = 0, j = numrels - 1;
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return 0;
while (i < j) {
if (branch_rela_needs_plt(syms, &rela[i], dstidx))
i++;

View File

@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
#define __my_cpu_offset __my_cpu_offset
#define PERCPU_OP(op, asm_op, c_op) \
static inline unsigned long __percpu_##op(void *ptr, \
static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
{ \
unsigned long ret; \
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static inline unsigned long __percpu_read(void *ptr, int size)
static __always_inline unsigned long __percpu_read(void *ptr, int size)
{
unsigned long ret;
@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
return ret;
}
static inline void __percpu_write(void *ptr, unsigned long val, int size)
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
}
}
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
{
switch (size) {
case 1:

View File

@ -117,7 +117,7 @@ void __flush_dcache_pages(struct page *page, unsigned int nr)
* get faulted into the tlb (and thus flushed) anyways.
*/
for (i = 0; i < nr; i++) {
addr = (unsigned long)kmap_local_page(page + i);
addr = (unsigned long)kmap_local_page(nth_page(page, i));
flush_data_cache_page(addr);
kunmap_local((void *)addr);
}

View File

@ -138,11 +138,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 8
config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT
default 17
default 18 if 64BIT
default 13
config ARCH_MMAP_RND_COMPAT_BITS_MAX
default 17
default 13
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM

View File

@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
/* Masks for stack and mmap randomization */
#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
#define STACK_RND_MASK MMAP_RND_MASK
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
#define STACK_RND_MASK 0x7ff /* 8MB of VA */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;

View File

@ -47,6 +47,8 @@
#ifndef __ASSEMBLY__
struct rlimit;
unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
unsigned long calc_max_stack_size(unsigned long stack_max);
/*

View File

@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
unsigned long arch_rev;
unsigned long pot_key;
unsigned long curr_key;
unsigned long width; /* default of PSW_W bit (1=enabled) */
};
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */

View File

@ -36,6 +36,24 @@
.level 2.0
#endif
/*
* We need seven instructions after a TLB insert for it to take effect.
* The PA8800/PA8900 processors are an exception and need 12 instructions.
* The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
*/
#ifdef CONFIG_64BIT
#define NUM_PIPELINE_INSNS 12
#else
#define NUM_PIPELINE_INSNS 7
#endif
/* Insert num nops */
.macro insert_nops num
.rept \num
nop
.endr
.endm
/* Get aligned page_table_lock address for this mm from cr28/tr4 */
.macro get_ptl reg
mfctl %cr28,\reg
@ -415,24 +433,20 @@
3:
.endm
/* Release page_table_lock without reloading lock address.
We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */
.macro ptl_unlock0 spc,tmp,tmp2
/* Release page_table_lock if for user space. We use an ordered
store to ensure all prior accesses are performed prior to
releasing the lock. Note stw may not be executed, so we
provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
.macro ptl_unlock spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
98: get_ptl \tmp
ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
or,COND(=) %r0,\spc,%r0
stw,ma \tmp2,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release page_table_lock. */
.macro ptl_unlock1 spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
98: get_ptl \tmp
ptl_unlock0 \spc,\tmp,\tmp2
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
insert_nops NUM_PIPELINE_INSNS - 4
#else
insert_nops NUM_PIPELINE_INSNS - 1
#endif
.endm
@ -461,13 +475,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
@ -475,8 +489,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
@ -1124,7 +1137,7 @@ dtlb_miss_20w:
idtlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1133,6 +1146,7 @@ dtlb_check_alias_20w:
idtlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1150,7 +1164,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1159,6 +1173,7 @@ nadtlb_check_alias_20w:
idtlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1184,7 +1199,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1194,6 +1209,7 @@ dtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1217,7 +1233,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1227,6 +1243,7 @@ nadtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1246,7 +1263,7 @@ dtlb_miss_20:
idtlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1255,6 +1272,7 @@ dtlb_check_alias_20:
idtlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1274,7 +1292,7 @@ nadtlb_miss_20:
idtlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1283,6 +1301,7 @@ nadtlb_check_alias_20:
idtlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1319,7 +1338,7 @@ itlb_miss_20w:
iitlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1343,7 +1362,7 @@ naitlb_miss_20w:
iitlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1352,6 +1371,7 @@ naitlb_check_alias_20w:
iitlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1377,7 +1397,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1401,7 +1421,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1411,6 +1431,7 @@ naitlb_check_alias_11:
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1431,7 +1452,7 @@ itlb_miss_20:
iitlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1451,7 +1472,7 @@ naitlb_miss_20:
iitlbt pte,prot
ptl_unlock1 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1460,6 +1481,7 @@ naitlb_check_alias_20:
iitlbt pte,prot
insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@ -1481,7 +1503,7 @@ dbit_trap_20w:
idtlbt pte,prot
ptl_unlock0 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
#else
@ -1507,7 +1529,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
ptl_unlock0 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
@ -1527,7 +1549,7 @@ dbit_trap_20:
idtlbt pte,prot
ptl_unlock0 spc,t0,t1
ptl_unlock spc,t0,t1
rfir
nop
#endif

View File

@ -70,9 +70,8 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
* and halt kernel if we detect a PA1.x CPU. */
#if defined(CONFIG_PA20)
/* check for 64-bit capable CPU as required by current kernel */
ldi 32,%r10
mtctl %r10,%cr11
.level 2.0

View File

@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
* indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout().
*/
static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
{
unsigned long stack_base;

View File

@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
val &= ~MMCRA_SAMPLE_ENABLE;
val &= ~MMCRA_SAMPLE_ENABLE;
/* Disable BHRB via mmcra (BHRBRD) for p10 */
if (ppmu->flags & PPMU_ARCH_31)
@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
* instruction sampling or BHRB.
*/
if (val != mmcra) {
mtspr(SPRN_MMCRA, mmcra);
mtspr(SPRN_MMCRA, val);
mb();
isync();
}

View File

@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
asmlinkage unsigned long get_overflow_stack(void);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);

View File

@ -82,6 +82,28 @@
.endr
.endm
#ifdef CONFIG_SMP
#ifdef CONFIG_32BIT
#define PER_CPU_OFFSET_SHIFT 2
#else
#define PER_CPU_OFFSET_SHIFT 3
#endif
.macro asm_per_cpu dst sym tmp
REG_L \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)
la \dst, \sym
add \dst, \dst, \tmp
.endm
#else /* CONFIG_SMP */
.macro asm_per_cpu dst sym tmp
la \dst, \sym
.endm
#endif /* CONFIG_SMP */
/* save all GPs except x1 ~ x5 */
.macro save_from_x6_to_x31
REG_S x6, PT_T1(sp)

View File

@ -10,4 +10,9 @@
#define RISCV_HWPROBE_MAX_KEY 5
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
}
#endif

View File

@ -33,8 +33,8 @@
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif
/*
* By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
* define the PAGE_OFFSET value for SV39.
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
* define the PAGE_OFFSET value for SV48 and SV39.
*/
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)

View File

@ -34,9 +34,6 @@
#ifndef __ASSEMBLY__
extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
extern unsigned long spin_shadow_stack;
#include <asm/processor.h>
#include <asm/csr.h>

View File

@ -14,7 +14,7 @@ static inline void cpu_relax(void)
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
#ifdef __riscv_zihintpause
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
/*
* Reduce instruction retirement.
* This assumes the PC changes.

View File

@ -39,6 +39,7 @@ void asm_offsets(void)
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);

View File

@ -10,9 +10,13 @@
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
#include <linux/sizes.h>
.section .irqentry.text, "ax"
SYM_CODE_START(handle_exception)
/*
@ -170,67 +174,15 @@ SYM_CODE_END(ret_from_exception)
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
/*
* Takes the psuedo-spinlock for the shadow stack, in case multiple
* harts are concurrently overflowing their kernel stacks. We could
* store any value here, but since we're overflowing the kernel stack
* already we only have SP to use as a scratch register. So we just
* swap in the address of the spinlock, as that's definately non-zero.
*
* Pairs with a store_release in handle_bad_stack().
*/
1: la sp, spin_shadow_stack
REG_AMOSWAP_AQ sp, sp, (sp)
bnez sp, 1b
/* we reach here from kernel context, sscratch must be 0 */
csrrw x31, CSR_SCRATCH, x31
asm_per_cpu sp, overflow_stack, x31
li x31, OVERFLOW_STACK_SIZE
add sp, sp, x31
/* zero out x31 again and restore x31 */
xor x31, x31, x31
csrrw x31, CSR_SCRATCH, x31
la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
//save caller register to shadow stack
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
la ra, restore_caller_reg
tail get_overflow_stack
restore_caller_reg:
//save per-cpu overflow stack
REG_S a0, -8(sp)
//restore caller register from shadow_stack
REG_L x1, PT_RA(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
//load per-cpu overflow stack
REG_L sp, -8(sp)
addi sp, sp, -(PT_SIZE_ON_STACK)
//save context to overflow stack

View File

@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
unsigned long val)
{
if (index == 0)
return false;
return true;
else if (index <= 31)
*((unsigned long *)regs + index) = val;
else

View File

@ -3,6 +3,7 @@
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <asm/insn.h>
#include "decode-insn.h"
@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
#endif
}
bool is_trap_insn(uprobe_opcode_t *insn)
{
return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
}
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);

View File

@ -410,48 +410,14 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
/*
* Extra stack space that allows us to provide panic messages when the kernel
* has overflowed its stack.
*/
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
/*
* A temporary stack for use by handle_kernel_stack_overflow. This is used so
* we can call into C code to get the per-hart overflow stack. Usage of this
* stack must be protected by spin_shadow_stack.
*/
long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
* harts concurrently. This isn't a real spinlock because the lock side must
* be taken without a valid stack and only a single register, it's only taken
* while in the process of panicing anyway so the performance and error
* checking a proper spinlock gives us doesn't matter.
*/
unsigned long spin_shadow_stack;
asmlinkage unsigned long get_overflow_stack(void)
{
return (unsigned long)this_cpu_ptr(overflow_stack) +
OVERFLOW_STACK_SIZE;
}
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
/*
* We're done with the shadow stack by this point, as we're on the
* overflow stack. Tell any other concurrent overflowing harts that
* they can proceed with panicing by releasing the pseudo-spinlock.
*
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
*/
smp_store_release(&spin_shadow_stack, 0);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n");

View File

@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
/* This is something we can handle, fill out the pairs. */
while (p < end) {
if (p->key <= RISCV_HWPROBE_MAX_KEY) {
if (riscv_hwprobe_key_is_valid(p->key)) {
p->value = avd->all_cpu_hwprobe_values[p->key];
} else {

View File

@ -36,3 +36,4 @@ endif
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o

17
arch/riscv/mm/cache-ops.c Normal file
View File

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*/
#include <asm/dma-noncoherent.h>
struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
void
riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
{
if (!ops)
return;
noncoherent_cache_ops = *ops;
}
EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);

View File

@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
EXPORT_SYMBOL_GPL(dma_cache_alignment);
struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
.wback = NULL,
.inv = NULL,
.wback_inv = NULL,
};
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
if (!noncoherent_supported)
dma_cache_alignment = 1;
}
void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
{
if (!ops)
return;
noncoherent_cache_ops = *ops;
}
EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);

View File

@ -384,6 +384,9 @@ static int __init ptdump_init(void)
kernel_ptd_info.base_addr = KERN_VIRT_START;
pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask;

View File

@ -21,10 +21,22 @@
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/page.h>
#include <asm/tlb.h>
#define GMAP_SHADOW_FAKE_TABLE 1ULL
static struct page *gmap_alloc_crst(void)
{
struct page *page;
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return NULL;
arch_set_page_dat(page, CRST_ALLOC_ORDER);
return page;
}
/**
* gmap_alloc - allocate and initialize a guest address space
* @limit: maximum address of the gmap address space
@ -67,7 +79,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
refcount_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
goto out_free;
page->index = 0;
@ -308,7 +320,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
new = page_to_virt(page);
@ -1759,7 +1771,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN;
@ -1843,7 +1855,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN;
@ -1927,7 +1939,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN;
@ -2855,7 +2867,7 @@ int s390_replace_asce(struct gmap *gmap)
if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
return -EINVAL;
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
page = gmap_alloc_crst();
if (!page)
return -ENOMEM;
page->index = 0;

View File

@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
continue;
if (!pud_folded(*pud)) {
page = phys_to_page(pud_val(*pud));
for (i = 0; i < 3; i++)
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pmd(pud, addr, next);
@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
continue;
if (!p4d_folded(*p4d)) {
page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++)
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pud(p4d, addr, next);
@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
continue;
if (!pgd_folded(*pgd)) {
page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++)
for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);

View File

@ -146,6 +146,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
ptdesc = pagetable_alloc(GFP_KERNEL, 0);
if (ptdesc) {
table = (u64 *)ptdesc_to_virt(ptdesc);
arch_set_page_dat(virt_to_page(table), 0);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}

View File

@ -12,6 +12,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <asm/page-states.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <asm/pgalloc.h>
@ -45,8 +46,11 @@ void *vmem_crst_alloc(unsigned long val)
unsigned long *table;
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
if (!table)
return NULL;
crst_table_init(table, val);
if (slab_is_available())
arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
return table;
}

View File

@ -24,8 +24,17 @@
#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha1_block_fn *sha1_xform)
{
@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
static int __init sha1_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha1_ssse3())
goto fail;

View File

@ -38,11 +38,20 @@
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/string.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks);
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static int _sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha256_block_fn *sha256_xform)
{
@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
static int __init sha256_ssse3_mod_init(void)
{
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha256_ssse3())
goto fail;

View File

@ -16,6 +16,9 @@
#include <asm/x86_init.h>
#include <asm/cpufeature.h>
#include <asm/irq_vectors.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h>
@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
if (!cpu_has(c, X86_FEATURE_MWAIT) ||
boot_option_idle_override == IDLE_NOMWAIT)
*cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
if (xen_initial_domain()) {
/*
* When Linux is running as Xen dom0, the hypervisor is the
* entity in charge of the processor power management, and so
* Xen needs to check the OS capabilities reported in the
* processor capabilities buffer matches what the hypervisor
* driver supports.
*/
xen_sanitize_proc_cap_bits(cap);
}
}
static inline bool acpi_has_cpu_in_madt(void)

View File

@ -108,6 +108,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
KVM_X86_OP_OPTIONAL(vcpu_unblocking)
KVM_X86_OP_OPTIONAL(pi_update_irte)
KVM_X86_OP_OPTIONAL(pi_start_assignment)
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
KVM_X86_OP_OPTIONAL(set_hv_timer)

View File

@ -1708,6 +1708,7 @@ struct kvm_x86_ops {
int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*pi_start_assignment)(struct kvm *kvm);
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);

View File

@ -553,6 +553,7 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_TW_CFG 0xc0011023
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1

View File

@ -12,13 +12,6 @@
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
/*
* Too small node sizes may confuse the VM badly. Usually they
* result from BIOS bugs. So dont recognize nodes as standalone
* NUMA entities that have less than this amount of RAM listed:
*/
#define NODE_MIN_SIZE (4*1024*1024)
extern int numa_off;
/*

View File

@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
enum xen_lazy_mode xen_get_lazy_mode(void);
#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
void xen_sanitize_proc_cap_bits(uint32_t *buf);
#else
static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
{
BUG();
}
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* caused by the non-atomic update of the address/data pair.
*
* Direct update is possible when:
* - The MSI is maskable (remapped MSI does not use this code path)).
* The quirk bit is not set in this case.
* - The MSI is maskable (remapped MSI does not use this code path).
* The reservation mode bit is set in this case.
* - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
* - The interrupt is not yet started up
* - The new destination CPU is the same as the old destination CPU
*/
if (!irqd_msi_nomask_quirk(irqd) ||
if (!irqd_can_reserve(irqd) ||
cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
!irqd_is_started(irqd) ||
@ -215,8 +215,6 @@ static bool x86_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
if (WARN_ON_ONCE(domain != real_parent))
return false;
info->chip->irq_set_affinity = msi_set_affinity;
/* See msi_set_affinity() for the gory details */
info->flags |= MSI_FLAG_NOMASK_QUIRK;
break;
case DOMAIN_BUS_DMAR:
case DOMAIN_BUS_AMDVI:

View File

@ -2422,10 +2422,8 @@ static void __init srso_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
}
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
if (has_microcode) {
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
srso_mitigation = SRSO_MITIGATION_IBPB;
goto out;
}

View File

@ -87,8 +87,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
/* Socket ID is ApicId[6] for these processors. */
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
/*
* Socket ID is ApicId[6] for the processors with model <= 0x3
* when running on host.
*/
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
cacheinfo_hygon_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {

View File

@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
uc_flags = frame_uc_flags(regs);
if (setup_signal_shadow_stack(ksig))
return -EFAULT;
if (!user_access_begin(frame, sizeof(*frame)))
return -EFAULT;
@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
return -EFAULT;
}
if (setup_signal_shadow_stack(ksig))
return -EFAULT;
/* Set up registers for signal handler */
regs->di = ksig->sig;
/* In case the signal handler was declared without prototypes */

View File

@ -727,10 +727,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer_cleanup(stimer);
stimer->count = count;
if (stimer->count == 0)
stimer->config.enable = 0;
else if (stimer->config.auto_enable)
stimer->config.enable = 1;
if (!host) {
if (stimer->count == 0)
stimer->config.enable = 0;
else if (stimer->config.auto_enable)
stimer->config.enable = 1;
}
if (stimer->config.enable)
stimer_mark_pending(stimer, false);

View File

@ -2444,22 +2444,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u64 val;
/*
* ICR is a single 64-bit register when x2APIC is enabled. For legacy
* xAPIC, ICR writes need to go down the common (slightly slower) path
* to get the upper half from ICR2.
* ICR is a single 64-bit register when x2APIC is enabled, all others
* registers hold 32-bit values. For legacy xAPIC, ICR writes need to
* go down the common path to get the upper half from ICR2.
*
* Note, using the write helpers may incur an unnecessary write to the
* virtual APIC state, but KVM needs to conditionally modify the value
* in certain cases, e.g. to clear the ICR busy bit. The cost of extra
* conditional branches is likely a wash relative to the cost of the
* maybe-unecessary write, and both are in the noise anyways.
*/
if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
val = kvm_lapic_get_reg64(apic, APIC_ICR);
kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
trace_kvm_apic_write(APIC_ICR, val);
} else {
/* TODO: optimize to just emulate side effect w/o one more write */
val = kvm_lapic_get_reg(apic, offset);
kvm_lapic_reg_write(apic, offset, (u32)val);
}
if (apic_x2apic_mode(apic) && offset == APIC_ICR)
kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
else
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
}
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
@ -2670,6 +2670,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
u64 msr_val;
int i;
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
if (!init_event) {
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(vcpu))
@ -2981,6 +2983,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
struct kvm_lapic *apic = vcpu->arch.apic;
int r;
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));

View File

@ -6912,7 +6912,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
}
static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -8286,7 +8286,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.apicv_post_state_restore = vmx_apicv_post_state_restore,
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,

View File

@ -3641,6 +3641,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
case MSR_AMD64_DC_CFG:
case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG:
break;
@ -4065,6 +4066,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
case MSR_AMD64_DC_CFG:
case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG:
/*
* Intel Sandy Bridge CPUs must support the RAPL (running average power

View File

@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (start >= end)
continue;
/*
* Don't confuse VM with a node that doesn't have the
* minimum amount of memory:
*/
if (end && (end - start) < NODE_MIN_SIZE)
continue;
alloc_node_data(nid);
}

View File

@ -3,9 +3,11 @@
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/suspend.h>
#include <linux/vgaarb.h>
#include <asm/amd_nb.h>
#include <asm/hpet.h>
@ -904,3 +906,60 @@ static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability);
#ifdef CONFIG_SUSPEND
/*
* Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but
* if the SoC is put into a hardware sleep state by the amd-pmc driver, the
* Root Ports don't generate wakeup interrupts for USB devices.
*
* When suspending, remove D3hot and D3cold from the PME_Support advertised
* by the Root Port so we don't use those states if we're expecting wakeup
* interrupts. Restore the advertised PME_Support when resuming.
*/
static void amd_rp_pme_suspend(struct pci_dev *dev)
{
struct pci_dev *rp;
/*
* PM_SUSPEND_ON means we're doing runtime suspend, which means
* amd-pmc will not be involved so PMEs during D3 work as advertised.
*
* The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware
* sleep state, but we assume amd-pmc is always present.
*/
if (pm_suspend_target_state == PM_SUSPEND_ON)
return;
rp = pcie_find_root_port(dev);
if (!rp->pm_cap)
return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >>
PCI_PM_CAP_PME_SHIFT);
dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n");
}
static void amd_rp_pme_resume(struct pci_dev *dev)
{
struct pci_dev *rp;
u16 pmc;
rp = pcie_find_root_port(dev);
if (!rp->pm_cap)
return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc);
rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
}
/* Rembrandt (yellow_carp) */
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume);
/* Phoenix (pink_sardine) */
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume);
#endif /* CONFIG_SUSPEND */

View File

@ -2875,11 +2875,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
goto queue_exit;
return NULL;
rq_qos_throttle(q, bio);
@ -2895,35 +2892,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
/* return true if this @rq can be used for @bio */
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{
struct request *rq;
enum hctx_type type, hctx_type;
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
enum hctx_type hctx_type = rq->mq_hctx->type;
if (!plug)
return NULL;
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
return NULL;
}
type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
return false;
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return false;
/*
* If any qos ->throttle() end up blocking, we will have flushed the
@ -2931,12 +2916,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
* before we throttle.
*/
plug->cached_rq = rq_list_next(rq);
rq_qos_throttle(q, *bio);
rq_qos_throttle(rq->q, bio);
blk_mq_rq_time_init(rq, 0);
rq->cmd_flags = (*bio)->bi_opf;
rq->cmd_flags = bio->bi_opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
return true;
}
static void bio_set_ioprio(struct bio *bio)
@ -2966,7 +2951,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
struct request *rq;
struct request *rq = NULL;
unsigned int nr_segs = 1;
blk_status_t ret;
@ -2977,20 +2962,36 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
if (!bio_integrity_prep(bio))
return;
bio_set_ioprio(bio);
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
if (!bio)
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q != q)
rq = NULL;
}
if (rq) {
if (!bio_integrity_prep(bio))
return;
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
if (blk_mq_can_use_cached_rq(rq, plug, bio))
goto done;
percpu_ref_get(&q->q_usage_counter);
} else {
if (unlikely(bio_queue_enter(bio)))
return;
if (!bio_integrity_prep(bio))
goto fail;
}
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq)) {
fail:
blk_queue_exit(q);
return;
}
done:
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);

View File

@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
return err;
}
@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
return err;
}

View File

@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_header = (void *)subtable_header + offset;
offset += record_header->length;
if (!record_header->length) {
pr_err(FW_BUG "Zero-length record found in FPTD.\n");
result = -EINVAL;
goto err;
}
switch (record_header->type) {
case RECORD_S3_RESUME:
if (subtable_type != SUBTABLE_S3PT) {
pr_err(FW_BUG "Invalid record %d for subtable %s\n",
record_header->type, signature);
return -EINVAL;
result = -EINVAL;
goto err;
}
if (record_resume) {
pr_err("Duplicate resume performance record found.\n");
@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_resume = (struct resume_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
if (result)
return result;
goto err;
break;
case RECORD_S3_SUSPEND:
if (subtable_type != SUBTABLE_S3PT) {
@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_suspend = (struct suspend_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
if (result)
return result;
goto err;
break;
case RECORD_BOOT:
if (subtable_type != SUBTABLE_FBPT) {
pr_err(FW_BUG "Invalid %d for subtable %s\n",
record_header->type, signature);
return -EINVAL;
result = -EINVAL;
goto err;
}
if (record_boot) {
pr_err("Duplicate boot performance record found.\n");
@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_boot = (struct boot_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
if (result)
return result;
goto err;
break;
default:
@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
}
}
return 0;
err:
if (record_boot)
sysfs_remove_group(fpdt_kobj, &boot_attr_group);
if (record_suspend)
sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
if (record_resume)
sysfs_remove_group(fpdt_kobj, &resume_attr_group);
return result;
}
static int __init acpi_init_fpdt(void)
@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
struct acpi_table_header *header;
struct fpdt_subtable_entry *subtable;
u32 offset = sizeof(*header);
int result;
status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
if (!fpdt_kobj) {
acpi_put_table(header);
return -ENOMEM;
result = -ENOMEM;
goto err_nomem;
}
while (offset < header->length) {
@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
switch (subtable->type) {
case SUBTABLE_FBPT:
case SUBTABLE_S3PT:
fpdt_process_subtable(subtable->address,
result = fpdt_process_subtable(subtable->address,
subtable->type);
if (result)
goto err_subtable;
break;
default:
/* Other types are reserved in ACPI 6.4 spec. */
@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
offset += sizeof(*subtable);
}
return 0;
err_subtable:
kobject_put(fpdt_kobj);
err_nomem:
acpi_put_table(header);
return result;
}
fs_initcall(acpi_init_fpdt);

View File

@ -209,6 +209,20 @@ err_pool_alloc:
return -ENOMEM;
}
/**
* ghes_estatus_pool_region_free - free previously allocated memory
* from the ghes_estatus_pool.
* @addr: address of memory to free.
* @size: size of memory to free.
*
* Returns none.
*/
void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
{
gen_pool_free(ghes_estatus_pool, addr, size);
}
EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
static int map_gen_v2(struct ghes *ghes)
{
return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
unsigned int devfn;
int aer_severity;
u8 *aer_info;
devfn = PCI_DEVFN(pcie_err->device_id.device,
pcie_err->device_id.function);
@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
if (gdata->flags & CPER_SEC_RESET)
aer_severity = AER_FATAL;
aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
sizeof(struct aer_capability_regs));
if (!aer_info)
return;
memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
aer_recover_queue(pcie_err->device_id.segment,
pcie_err->device_id.bus,
devfn, aer_severity,
(struct aer_capability_regs *)
pcie_err->aer_info);
aer_info);
}
#endif
}

View File

@ -1924,6 +1924,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
},
},
{
/*
* HP 250 G7 Notebook PC
*/
.callback = ec_honor_dsdt_gpe,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
},
},
{
/*
* Samsung hardware

View File

@ -495,6 +495,18 @@ static const struct dmi_system_id maingear_laptop[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
}
},
{
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
},
},
{
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
},
},
{
.ident = "MAINGEAR Vector Pro 2 17",
.matches = {

View File

@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
static int reset_sar(struct atm_dev *dev)
{
IADEV *iadev;
int i, error = 1;
int i, error;
unsigned int pci[64];
iadev = INPH_IA_DEV(dev);
for(i=0; i<64; i++)
if ((error = pci_read_config_dword(iadev->pci,
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
return error;
for (i = 0; i < 64; i++) {
error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
if (error != PCIBIOS_SUCCESSFUL)
return error;
}
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
for(i=0; i<64; i++)
if ((error = pci_write_config_dword(iadev->pci,
i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
return error;
for (i = 0; i < 64; i++) {
error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
if (error != PCIBIOS_SUCCESSFUL)
return error;
}
udelay(5);
return 0;
}

View File

@ -1274,8 +1274,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev);
device_links_driver_cleanup(dev);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);

View File

@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
return 0;
}
static int rbtree_all(const void *key, const struct rb_node *node)
{
return 0;
}
/**
* regcache_sync - Sync the register cache with the hardware.
*
@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
unsigned int i;
const char *name;
bool bypass;
struct rb_node *node;
if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL;
@ -392,6 +398,30 @@ out:
/* Restore the bypass state */
map->cache_bypass = bypass;
map->no_sync_defaults = false;
/*
* If we did any paging with cache bypassed and a cached
* paging register then the register and cache state might
* have gone out of sync, force writes of all the paging
* registers.
*/
rb_for_each(node, 0, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
/* If there's nothing in the cache there's nothing to sync */
ret = regcache_read(map, this->selector_reg, &i);
if (ret != 0)
continue;
ret = _regmap_write(map, this->selector_reg, i);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
this->selector_reg, i, ret);
break;
}
}
map->unlock(map->lock_arg);
regmap_async_complete(map);

View File

@ -1313,6 +1313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
size_t max_dma_size;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@ -1411,7 +1412,8 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */
blk_queue_max_hw_sectors(q, UINT_MAX);
max_size = virtio_max_dma_size(vdev);
max_dma_size = virtio_max_dma_size(vdev);
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
/* Host can optionally specify maximum segment size and number of
* segments. */

View File

@ -543,6 +543,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@ -2818,6 +2822,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
goto err_free_wc;
}
if (data->evt_skb == NULL)
goto err_free_wc;
/* Parse and handle the return WMT event */
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
if (wmt_evt->whdr.op != hdr->op) {

View File

@ -38,7 +38,7 @@ static struct _parisc_agp_info {
int lba_cap_offset;
u64 *gatt;
__le64 *gatt;
u64 gatt_entries;
u64 gart_base;
@ -104,7 +104,7 @@ parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
int i;
for (i = 0; i < info->gatt_entries; i++) {
info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
}
return 0;
@ -158,9 +158,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
for (k = 0;
k < info->io_pages_per_kpage;
k++, j++, paddr += info->io_page_size) {
info->gatt[j] =
info->gatt[j] = cpu_to_le64(
parisc_agp_mask_memory(agp_bridge,
paddr, type);
paddr, type));
asm_io_fdc(&info->gatt[j]);
}
}
@ -184,7 +184,7 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
io_pg_start = info->io_pages_per_kpage * pg_start;
io_pg_count = info->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
info->gatt[i] = agp_bridge->scratch_page;
info->gatt[i] = cpu_to_le64(agp_bridge->scratch_page);
}
agp_bridge->driver->tlb_flush(mem);
@ -204,7 +204,8 @@ parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
return cpu_to_le64(pa);
/* return native (big-endian) PDIR entry */
return pa;
}
static void
@ -251,7 +252,8 @@ static int __init
agp_ioc_init(void __iomem *ioc_regs)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u64 iova_base, *io_pdir, io_tlb_ps;
u64 iova_base, io_tlb_ps;
__le64 *io_pdir;
int io_tlb_shift;
printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");

View File

@ -72,7 +72,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
&gpll0_main.clkr.hw },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -86,7 +85,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
&gpll0_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -161,7 +159,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
&gpll6_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -192,7 +189,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
&gpll4_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -243,7 +239,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
&gpll2_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -274,7 +269,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
&nss_crypto_pll_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};

View File

@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
&gpll0_main.clkr.hw },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -121,7 +120,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
&gpll2_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -154,7 +152,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
&gpll4_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -188,7 +185,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
&gpll6_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -201,7 +197,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
&gpll6_main.clkr.hw },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -266,7 +261,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
&nss_crypto_pll_main.clkr.hw },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
},
};

View File

@ -7,8 +7,10 @@
#define __STRATIX10_CLK_H
struct stratix10_clock_data {
struct clk_hw_onecell_data clk_data;
void __iomem *base;
/* Must be last */
struct clk_hw_onecell_data clk_data;
};
struct stratix10_pll_clock {

View File

@ -15,8 +15,10 @@
struct visconti_pll_provider {
void __iomem *reg_base;
struct clk_hw_onecell_data clk_data;
struct device_node *node;
/* Must be last */
struct clk_hw_onecell_data clk_data;
};
#define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \

View File

@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP /* free-run */
| ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
tcaddr + ATMEL_TC_REG(0, CMR));

View File

@ -131,23 +131,23 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, " From : To\n");
len += sysfs_emit_at(buf, len, " : ");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
if (len >= PAGE_SIZE - 1)
return PAGE_SIZE - 1;
len += sysfs_emit_at(buf, len, "\n");
for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE)
if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
if (len >= PAGE_SIZE - 1)
break;
if (pending)
@ -157,12 +157,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += sysfs_emit_at(buf, len, "%9u ", count);
}
if (len >= PAGE_SIZE)
if (len >= PAGE_SIZE - 1)
break;
len += sysfs_emit_at(buf, len, "\n");
}
if (len >= PAGE_SIZE) {
if (len >= PAGE_SIZE - 1) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
}

View File

@ -847,6 +847,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
qp->qp_status.cq_head, 0);
atomic_dec(&qp->qp_status.used);
cond_resched();
}
/* set c_flag */

View File

@ -1242,35 +1242,39 @@ static struct device *grandparent(struct device *dev)
return NULL;
}
static struct device *endpoint_host(struct cxl_port *endpoint)
{
struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
if (is_cxl_root(port))
return port->uport_dev;
return &port->dev;
}
static void delete_endpoint(void *data)
{
struct cxl_memdev *cxlmd = data;
struct cxl_port *endpoint = cxlmd->endpoint;
struct cxl_port *parent_port;
struct device *parent;
struct device *host = endpoint_host(endpoint);
parent_port = cxl_mem_find_port(cxlmd, NULL);
if (!parent_port)
goto out;
parent = &parent_port->dev;
device_lock(parent);
if (parent->driver && !endpoint->dead) {
devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
device_lock(host);
if (host->driver && !endpoint->dead) {
devm_release_action(host, cxl_unlink_parent_dport, endpoint);
devm_release_action(host, cxl_unlink_uport, endpoint);
devm_release_action(host, unregister_port, endpoint);
}
cxlmd->endpoint = NULL;
device_unlock(parent);
put_device(parent);
out:
device_unlock(host);
put_device(&endpoint->dev);
put_device(host);
}
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
{
struct device *host = endpoint_host(endpoint);
struct device *dev = &cxlmd->dev;
get_device(host);
get_device(&endpoint->dev);
cxlmd->endpoint = endpoint;
cxlmd->depth = endpoint->depth;

View File

@ -1127,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
}
if (is_cxl_root(parent_port)) {
parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
/*
* Root decoder IG is always set to value in CFMWS which
* may be different than this region's IG. We can use the
* region's IG here since interleave_granularity_store()
* does not allow interleaved host-bridges with
* root IG != region IG.
*/
parent_ig = p->interleave_granularity;
parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
/*
* For purposes of address bit routing, use power-of-2 math for
@ -1675,6 +1682,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
if (p->nr_targets >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
p->nr_targets);
return -EINVAL;
}
ep_port = cxled_to_port(cxled);
root_port = cxlrd_to_port(cxlrd);
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
@ -1767,7 +1780,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->nr_targets == p->interleave_ways) {
rc = cxl_region_setup_targets(cxlr);
if (rc)
goto err_decrement;
return rc;
p->state = CXL_CONFIG_ACTIVE;
}
@ -1799,12 +1812,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
}
return 0;
err_decrement:
p->nr_targets--;
cxled->pos = -1;
p->targets[pos] = NULL;
return rc;
}
static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)

View File

@ -489,7 +489,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_maxburst = chan->dma_config.src_maxburst;
dst_maxburst = chan->dma_config.dst_maxburst;
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
@ -965,7 +965,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));

View File

@ -167,6 +167,12 @@ static enum qcom_scm_convention __get_convention(void)
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
return qcom_scm_convention;
/*
* Per the "SMC calling convention specification", the 64-bit calling
* convention can only be used when the client is 64-bit, otherwise
* system will encounter the undefined behaviour.
*/
#if IS_ENABLED(CONFIG_ARM64)
/*
* Device isn't required as there is only one argument - no device
* needed to dma_map_single to secure world
@ -187,6 +193,7 @@ static enum qcom_scm_convention __get_convention(void)
forced = true;
goto found;
}
#endif
probed_convention = SMC_CONVENTION_ARM_32;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);

View File

@ -1655,6 +1655,26 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "SYNA1202:00@16",
},
},
{
/*
* On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
* a "dolby" button. At the ACPI level an _AEI event-handler
* is connected which sets an ACPI variable to 1 on both
* edges. This variable can be polled + cleared to 0 using
* WMI. But since the variable is set on both edges the WMI
* interface is pretty useless even when polling.
* So instead the x86-android-tablets code instantiates
* a gpio-keys platform device for it.
* Ignore the _AEI handler for the pin, so that it is not busy.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
},
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
.ignore_interrupt = "INT33FC:00@3",
},
},
{} /* Terminating entry */
};

View File

@ -512,6 +512,10 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
#if IS_ENABLED(CONFIG_SND_SOC_CS42L56)
{ "reset", "cirrus,gpio-nreset", "cirrus,cs42l56" },
#endif
#if IS_ENABLED(CONFIG_SND_SOC_MT2701_CS42448)
{ "i2s1-in-sel-gpio1", NULL, "mediatek,mt2701-cs42448-machine" },
{ "i2s1-in-sel-gpio2", NULL, "mediatek,mt2701-cs42448-machine" },
#endif
#if IS_ENABLED(CONFIG_SND_SOC_TLV320AIC3X)
{ "reset", "gpio-reset", "ti,tlv320aic3x" },
{ "reset", "gpio-reset", "ti,tlv320aic33" },

View File

@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "atom.h"
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/acpi.h>
@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return false;
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)

View File

@ -183,6 +183,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
}
rcu_read_unlock();
*result = NULL;
return -ENOENT;
}

View File

@ -1411,7 +1411,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
DRM_DEBUG("Failed to process the buffer list %d!\n", r);
goto error_fini;
}

View File

@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
ssize_t result = 0;
int r;
if (!adev->smc_rreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
ssize_t result = 0;
int r;
if (!adev->smc_wreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;

View File

@ -43,6 +43,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/device.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@ -2018,7 +2019,6 @@ out:
*/
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent;
int i, r;
bool total;
@ -2089,7 +2089,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((adev->flags & AMD_IS_APU) == 0) &&
!pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
!dev_is_removable(&adev->pdev->dev))
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
@ -2103,6 +2103,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
if (!amdgpu_device_pcie_dynamic_switching_supported())
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
@ -3901,7 +3903,7 @@ fence_driver_init:
px = amdgpu_device_supports_px(ddev);
if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px);
@ -4046,7 +4048,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
@ -5183,7 +5185,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* Flush RAM to disk so that after reboot
* the user can read log and see why the system rebooted.
*/
if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
amdgpu_ras_get_context(adev)->reboot) {
DRM_WARN("Emergency reboot.");
ksys_sync_helper();

View File

@ -93,6 +93,7 @@
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
@ -231,8 +232,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
int ret = 0;
uint64_t vram_size;
u32 msg;
int i, ret = 0;
/* It can take up to a second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
* wait for this to complete. Once the C2PMSG is updated, we can
* continue.
*/
if (dev_is_removable(&adev->pdev->dev)) {
for (i = 0; i < 1000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
msleep(1);
}
}
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
if (vram_size) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;

View File

@ -556,8 +556,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
mqd_prop.hqd_active = false;
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
}
mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
amdgpu_bo_unreserve(q->mqd_obj);
}
@ -993,9 +1005,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
switch (queue_type) {
case AMDGPU_RING_TYPE_GFX:
ring->funcs = adev->gfx.gfx_ring[0].funcs;
ring->me = adev->gfx.gfx_ring[0].me;
ring->pipe = adev->gfx.gfx_ring[0].pipe;
break;
case AMDGPU_RING_TYPE_COMPUTE:
ring->funcs = adev->gfx.compute_ring[0].funcs;
ring->me = adev->gfx.compute_ring[0].me;
ring->pipe = adev->gfx.compute_ring[0].pipe;
break;
case AMDGPU_RING_TYPE_SDMA:
ring->funcs = adev->sdma.instance[0].ring.funcs;

View File

@ -1373,7 +1373,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
sysfs_remove_file_from_group(&adev->dev->kobj,
if (adev->dev->kobj.sd)
sysfs_remove_file_from_group(&adev->dev->kobj,
&con->badpages_attr.attr,
RAS_FS_NAME);
}
@ -1390,7 +1391,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
.attrs = attrs,
};
sysfs_remove_group(&adev->dev->kobj, &group);
if (adev->dev->kobj.sd)
sysfs_remove_group(&adev->dev->kobj, &group);
return 0;
}
@ -1437,7 +1439,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->attr_inuse)
return -EINVAL;
sysfs_remove_file_from_group(&adev->dev->kobj,
if (adev->dev->kobj.sd)
sysfs_remove_file_from_group(&adev->dev->kobj,
&obj->sysfs_attr.attr,
RAS_FS_NAME);
obj->attr_inuse = 0;

View File

@ -292,8 +292,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
void *ptr;
int i, idx;
bool in_ras_intr = amdgpu_ras_intr_triggered();
cancel_delayed_work_sync(&adev->vcn.idle_work);
/* err_event_athub will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr)
return 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;

View File

@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
continue;
drm_mode_probed_add(connector, mode);
}

View File

@ -1095,8 +1095,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bo = gem_to_amdgpu_bo(gobj);
}
mem = bo->tbo.resource;
if (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_PREEMPT))
pages_addr = bo->tbo.ttm->dma_address;
}
@ -2125,7 +2125,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
* Returns:
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int32_t xcp_id)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@ -2144,6 +2145,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
INIT_LIST_HEAD(&vm->done);
INIT_LIST_HEAD(&vm->pt_freed);
INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
INIT_KFIFO(vm->faults);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@ -2178,34 +2180,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
false, &root, xcp_id);
if (r)
goto error_free_delayed;
root_bo = &root->bo;
root_bo = amdgpu_bo_ref(&root->bo);
r = amdgpu_bo_reserve(root_bo, true);
if (r) {
amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
goto error_free_delayed;
}
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
if (r)
goto error_free_root;
r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
r = amdgpu_vm_pt_clear(adev, vm, root, false);
if (r)
goto error_unreserve;
goto error_free_root;
amdgpu_bo_unreserve(vm->root.bo);
INIT_KFIFO(vm->faults);
amdgpu_bo_unref(&root_bo);
return 0;
error_unreserve:
amdgpu_bo_unreserve(vm->root.bo);
error_free_root:
amdgpu_bo_unref(&root->shadow);
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
vm->root.bo = NULL;
error_free_delayed:
dma_fence_put(vm->last_tlb_flush);

View File

@ -28,6 +28,7 @@
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
#include <linux/device.h>
#include <linux/pci.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev))
if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev))
if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;

View File

@ -59,6 +59,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
/* Read USB-PD from LFB */
#define GFX_CMD_USB_PD_USE_LFB 0x480
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 20000
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
#define MBOX_STATUS_MASK 0x0000FFFF
@ -138,7 +141,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int retry_loop, ret;
for (retry_loop = 0; retry_loop < 70; retry_loop++) {
for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(

Some files were not shown because too many files have changed in this diff Show More