This is the 4.14.202 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+Kq8IACgkQONu9yGCS
 aT7JOhAAneMni9sZDK6AfPRZXxrc7EqHHZ4C8wqxKCBLryaPLQjFh40xobAyLThT
 tvE72BrYKTJ3nSvzqntkVsuE8Wurj8HzN7tV+kPUGdp6NAv5bErYrFi7WosqkSyN
 yEAyz0ulxzcbGLKQHe4e0TTastNUayUCUDiUaAT0dfzJUn8ldXHXeOJav/tX+jRQ
 xhv0ejfKpalrHxPVcWvU2s8ZpxHvw5rUdt/vpdnAH/+NT9Qg9VibNx0gl1qaZXrt
 H02h9xaA8CDHB0p+ZNwEQf/urhglMB3ksgJKO6ZvANOHROuvrRm1KVERB/wEdR47
 PFOGBgEEwIfTlkN0b6mTo1W3Lg9hACevepromcVHVrQS2eKnUD+RsuutWmTjla7X
 c2O5kmsAnFu1MBx5b5WSBfdvYA2oyNEQvRKkxgEFgouzwLh1SKDA+Mv69DBiUQjQ
 7cCEbARoVb6P7B0fvT/zF98lBwEwMLZjwGxjK94bQvYqzwr3nNH3SZAl59LsxdFV
 gv19CxIMo3mvppxDDKiMJeAOcsQJYDGWKEvMl5Mx6L2Sn9SjIUR8IG9qiwTJ1I9g
 PTRDgilMAHIFUVwloeR2eTlEgkmGe+fKb55/o4d/3cWfBgtDNLRhROeXsiqhxv49
 xV31K6NBiQO27ckOpOi2NzgeyvOqaD2k5dNK+e3xCKk/7g/GgBg=
 =bZGN
 -----END PGP SIGNATURE-----

Merge tag 'v4.14.202' into v4.14/base

This is the 4.14.202 stable release
This commit is contained in:
Bruce Ashfield 2020-10-20 23:02:44 -04:00
commit a4988999fa
1492 changed files with 12360 additions and 6530 deletions

View File

@ -1524,7 +1524,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
KernelVersion: 4.3 KernelVersion: 4.3
Contact: linux-iio@vger.kernel.org Contact: linux-iio@vger.kernel.org
Description: Description:
Raw (unscaled no offset etc.) percentage reading of a substance. Raw (unscaled no offset etc.) reading of a substance. Units
after application of scale and offset are percents.
What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw

View File

@ -381,6 +381,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/mds
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/itlb_multihit /sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018 Date: January 2018

View File

@ -14,3 +14,4 @@ are configurable at compile, boot or run time.
mds mds
tsx_async_abort tsx_async_abort
multihit.rst multihit.rst
special-register-buffer-data-sampling.rst

View File

@ -0,0 +1,149 @@
.. SPDX-License-Identifier: GPL-2.0
SRBDS - Special Register Buffer Data Sampling
=============================================
SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to
infer values returned from special register accesses. Special register
accesses are accesses to off core registers. According to Intel's evaluation,
the special register reads that have a security expectation of privacy are
RDRAND, RDSEED and SGX EGETKEY.
When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved
to the core through the special register mechanism that is susceptible
to MDS attacks.
Affected processors
--------------------
Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may
be affected.
A processor is affected by SRBDS if its Family_Model and stepping is
in the following list, with the exception of the listed processors
exporting MDS_NO while Intel TSX is available yet not enabled. The
latter class of processors are only affected when Intel TSX is enabled
by software using TSX_CTRL_MSR otherwise they are not affected.
============= ============ ========
common name Family_Model Stepping
============= ============ ========
IvyBridge 06_3AH All
Haswell 06_3CH All
Haswell_L 06_45H All
Haswell_G 06_46H All
Broadwell_G 06_47H All
Broadwell 06_3DH All
Skylake_L 06_4EH All
Skylake 06_5EH All
Kabylake_L 06_8EH <= 0xC
Kabylake 06_9EH <= 0xD
============= ============ ========
Related CVEs
------------
The following CVE entry is related to this SRBDS issue:
============== ===== =====================================
CVE-2020-0543 SRBDS Special Register Buffer Data Sampling
============== ===== =====================================
Attack scenarios
----------------
An unprivileged user can extract values returned from RDRAND and RDSEED
executed on another core or sibling thread using MDS techniques.
Mitigation mechanism
-------------------
Intel will release microcode updates that modify the RDRAND, RDSEED, and
EGETKEY instructions to overwrite secret special register data in the shared
staging buffer before the secret data can be accessed by another logical
processor.
During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core
accesses from other logical processors will be delayed until the special
register read is complete and the secret data in the shared staging buffer is
overwritten.
This has three effects on performance:
#. RDRAND, RDSEED, or EGETKEY instructions have higher latency.
#. Executing RDRAND at the same time on multiple logical processors will be
serialized, resulting in an overall reduction in the maximum RDRAND
bandwidth.
#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other
logical processors that miss their core caches, with an impact similar to
legacy locked cache-line-split accesses.
The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable
the mitigation for RDRAND and RDSEED instructions executed outside of Intel
Software Guard Extensions (Intel SGX) enclaves. On logical processors that
disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not
take longer to execute and do not impact performance of sibling logical
processors memory accesses. The opt-out mechanism does not affect Intel SGX
enclaves (including execution of RDRAND or RDSEED inside an enclave, as well
as EGETKEY execution).
IA32_MCU_OPT_CTRL MSR Definition
--------------------------------
Along with the mitigation for this issue, Intel added a new thread-scope
IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and
RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL =
9]==1. This MSR is introduced through the microcode update.
Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor
disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX
enclave on that logical processor. Opting out of the mitigation for a
particular logical processor does not affect the RDRAND and RDSEED mitigations
for other logical processors.
Note that inside of an Intel SGX enclave, the mitigation is applied regardless
of the value of RNGDS_MITG_DS.
Mitigation control on the kernel command line
---------------------------------------------
The kernel command line allows control over the SRBDS mitigation at boot time
with the option "srbds=". The option for this is:
============= =============================================================
off This option disables SRBDS mitigation for RDRAND and RDSEED on
affected platforms.
============= =============================================================
SRBDS System Information
-----------------------
The Linux kernel provides vulnerability status information through sysfs. For
SRBDS this can be accessed by the following sysfs file:
/sys/devices/system/cpu/vulnerabilities/srbds
The possible values contained in this file are:
============================== =============================================
Not affected Processor not vulnerable
Vulnerable Processor vulnerable and mitigation disabled
Vulnerable: No microcode Processor vulnerable and microcode is missing
mitigation
Mitigation: Microcode Processor is vulnerable and mitigation is in
effect.
Mitigation: TSX disabled Processor is only vulnerable when TSX is
enabled while this system was booted with TSX
disabled.
Unknown: Dependent on
hypervisor status Running on virtual guest processor that is
affected but with no way to know if host
processor is mitigated or vulnerable.
============================== =============================================
SRBDS Default mitigation
------------------------
This new microcode serializes processor access during execution of RDRAND,
RDSEED ensures that the shared buffer is overwritten before it is released for
reuse. Use the "srbds=off" kernel command line to disable the mitigation for
RDRAND and RDSEED.

View File

@ -4234,6 +4234,26 @@
spia_pedr= spia_pedr=
spia_peddr= spia_peddr=
srbds= [X86,INTEL]
Control the Special Register Buffer Data Sampling
(SRBDS) mitigation.
Certain CPUs are vulnerable to an MDS-like
exploit which can leak bits from the random
number generator.
By default, this issue is mitigated by
microcode. However, the microcode fix can cause
the RDRAND and RDSEED instructions to become
much slower. Among other effects, this will
result in reduced throughput from /dev/urandom.
The microcode mitigation can be disabled with
the following option:
off: Disable mitigation and remove
performance impact to RDRAND and RDSEED
srcutree.counter_wrap_check [KNL] srcutree.counter_wrap_check [KNL]
Specifies how frequently to check for Specifies how frequently to check for
grace-period sequence counter wrap for the grace-period sequence counter wrap for the

View File

@ -16,6 +16,9 @@ Required properties:
Documentation/devicetree/bindings/graph.txt. This port should be connected Documentation/devicetree/bindings/graph.txt. This port should be connected
to the input port of an attached HDMI or LVDS encoder chip. to the input port of an attached HDMI or LVDS encoder chip.
Optional properties:
- pinctrl-names: Contain "default" and "sleep".
Example: Example:
dpi0: dpi@1401d000 { dpi0: dpi@1401d000 {
@ -26,6 +29,9 @@ dpi0: dpi@1401d000 {
<&mmsys CLK_MM_DPI_ENGINE>, <&mmsys CLK_MM_DPI_ENGINE>,
<&apmixedsys CLK_APMIXED_TVDPLL>; <&apmixedsys CLK_APMIXED_TVDPLL>;
clock-names = "pixel", "engine", "pll"; clock-names = "pixel", "engine", "pll";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&dpi_pin_func>;
pinctrl-1 = <&dpi_pin_idle>;
port { port {
dpi0_out: endpoint { dpi0_out: endpoint {

View File

@ -21,7 +21,7 @@ controller state. The mux controller state is described in
Example: Example:
mux: mux-controller { mux: mux-controller {
compatible = "mux-gpio"; compatible = "gpio-mux";
#mux-control-cells = <0>; #mux-control-cells = <0>;
mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>, mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,

View File

@ -14,9 +14,15 @@ Required properties:
- #gpio-cells : Must be 2. The first cell is the pin number and the - #gpio-cells : Must be 2. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused). second cell is used to specify optional parameters (currently unused).
- AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, - power supplies for the device, as covered in
SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered Documentation/devicetree/bindings/regulator/regulator.txt, depending
in Documentation/devicetree/bindings/regulator/regulator.txt on compatible:
- for wlf,wm1811 and wlf,wm8958:
AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
- for wlf,wm8994:
AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
SPKVDD1-supply, SPKVDD2-supply
Optional properties: Optional properties:
@ -68,11 +74,11 @@ codec: wm8994@1a {
lineout1-se; lineout1-se;
AVDD1-supply = <&regulator>;
AVDD2-supply = <&regulator>; AVDD2-supply = <&regulator>;
CPVDD-supply = <&regulator>; CPVDD-supply = <&regulator>;
DBVDD1-supply = <&regulator>; DBVDD-supply = <&regulator>;
DBVDD2-supply = <&regulator>; DCVDD-supply = <&regulator>;
DBVDD3-supply = <&regulator>;
SPKVDD1-supply = <&regulator>; SPKVDD1-supply = <&regulator>;
SPKVDD2-supply = <&regulator>; SPKVDD2-supply = <&regulator>;
}; };

View File

@ -47,6 +47,8 @@ Optional properties:
from P0 to P1/P2/P3 without delay. from P0 to P1/P2/P3 without delay.
- snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check - snps,dis-tx-ipgap-linecheck-quirk: when set, disable u2mac linestate check
during HS transmit. during HS transmit.
- snps,parkmode-disable-ss-quirk: when set, all SuperSpeed bus instances in
park mode are disabled.
- snps,dis_metastability_quirk: when set, disable metastability workaround. - snps,dis_metastability_quirk: when set, disable metastability workaround.
CAUTION: use only if you are absolutely sure of it. CAUTION: use only if you are absolutely sure of it.
- snps,is-utmi-l1-suspend: true when DWC3 asserts output signal - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal

View File

@ -251,7 +251,7 @@ High-level taskfile hooks
:: ::
void (*qc_prep) (struct ata_queued_cmd *qc); enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
int (*qc_issue) (struct ata_queued_cmd *qc); int (*qc_issue) (struct ata_queued_cmd *qc);

View File

@ -277,7 +277,7 @@ unregisters the partitions in the MTD layer.
static void __exit board_cleanup (void) static void __exit board_cleanup (void)
{ {
/* Release resources, unregister device */ /* Release resources, unregister device */
nand_release (board_mtd); nand_release (mtd_to_nand(board_mtd));
/* unmap physical address */ /* unmap physical address */
iounmap(baseaddr); iounmap(baseaddr);

View File

@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
- R maps to r for user, group and others. On directories, R implies x. - R maps to r for user, group and others. On directories, R implies x.
- If both W and D are allowed, w will be set. - W maps to w.
- E maps to x. - E maps to x.
- H and P are always retained and ignored under Linux. - D is ignored.
- A is always reset when a file is written to. - H, S and P are always retained and ignored under Linux.
- A is cleared when a file is written to.
User id and group id will be used unless set[gu]id are given as mount User id and group id will be used unless set[gu]id are given as mount
options. Since most of the Amiga file systems are single user systems options. Since most of the Amiga file systems are single user systems
@ -111,11 +113,13 @@ Linux -> Amiga:
The Linux rwxrwxrwx file mode is handled as follows: The Linux rwxrwxrwx file mode is handled as follows:
- r permission will set R for user, group and others. - r permission will allow R for user, group and others.
- w permission will set W and D for user, group and others. - w permission will allow W for user, group and others.
- x permission of the user will set E for plain files. - x permission of the user will allow E for plain files.
- D will be allowed for user, group and others.
- All other flags (suid, sgid, ...) are ignored and will - All other flags (suid, sgid, ...) are ignored and will
not be retained. not be retained.

View File

@ -3737,9 +3737,11 @@ EOI was received.
#define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2 #define KVM_EXIT_HYPERV_HCALL 2
__u32 type; __u32 type;
__u32 pad1;
union { union {
struct { struct {
__u32 msr; __u32 msr;
__u32 pad2;
__u64 control; __u64 control;
__u64 evt_page; __u64 evt_page;
__u64 msg_page; __u64 msg_page;

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 182 SUBLEVEL = 202
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille
@ -482,7 +482,7 @@ ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),) ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif endif
ifneq ($(GCC_TOOLCHAIN),) ifneq ($(GCC_TOOLCHAIN),)
@ -542,12 +542,8 @@ KBUILD_MODULES :=
KBUILD_BUILTIN := 1 KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects. # If we have only "make modules", don't compile built-in objects.
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifeq ($(MAKECMDGOALS),modules) ifeq ($(MAKECMDGOALS),modules)
KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) KBUILD_BUILTIN :=
endif endif
# If we have "make <whatever> modules", compile modules # If we have "make <whatever> modules", compile modules
@ -1249,6 +1245,13 @@ ifdef CONFIG_MODULES
all: modules all: modules
# When we're building modules with modversions, we need to consider
# the built-in objects during the descend as well, in order to
# make sure the checksums are up to date before we record them.
ifdef CONFIG_MODVERSIONS
KBUILD_BUILTIN := 1
endif
# Build modules # Build modules
# #
# A module can be listed more than once in obj-m resulting in # A module can be listed more than once in obj-m resulting in

View File

@ -493,10 +493,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
} }
#endif #endif
#define ioread16be(p) be16_to_cpu(ioread16(p)) #define ioread16be(p) swab16(ioread16(p))
#define ioread32be(p) be32_to_cpu(ioread32(p)) #define ioread32be(p) swab32(ioread32(p))
#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) #define iowrite16be(v,p) iowrite16(swab16(v), (p))
#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) #define iowrite32be(v,p) iowrite32(swab32(v), (p))
#define inb_p inb #define inb_p inb
#define inw_p inw #define inw_p inw

View File

@ -30,11 +30,13 @@
* Address valid if: * Address valid if:
* - "addr" doesn't have any high-bits set * - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set * - AND "size" doesn't have any high-bits set
* - AND "addr+size" doesn't have any high-bits set * - AND "addr+size-(size != 0)" doesn't have any high-bits set
* - OR we are in kernel mode. * - OR we are in kernel mode.
*/ */
#define __access_ok(addr, size) \ #define __access_ok(addr, size) ({ \
((get_fs().seg & (addr | size | (addr+size))) == 0) unsigned long __ao_a = (addr), __ao_b = (size); \
unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
(get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
#define access_ok(type, addr, size) \ #define access_ok(type, addr, size) \
({ \ ({ \

View File

@ -79,6 +79,8 @@
arcpct: pct { arcpct: pct {
compatible = "snps,archs-pct"; compatible = "snps,archs-pct";
interrupt-parent = <&cpu_intc>;
interrupts = <20>;
}; };
/* TIMER0 with interrupt for clockevent */ /* TIMER0 with interrupt for clockevent */
@ -161,7 +163,7 @@
reg = <0x8000 0x2000>; reg = <0x8000 0x2000>;
interrupts = <10>; interrupts = <10>;
interrupt-names = "macirq"; interrupt-names = "macirq";
phy-mode = "rgmii"; phy-mode = "rgmii-id";
snps,pbl = <32>; snps,pbl = <32>;
snps,multicast-filter-bins = <256>; snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>; clocks = <&gmacclk>;
@ -177,7 +179,7 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
compatible = "snps,dwmac-mdio"; compatible = "snps,dwmac-mdio";
phy0: ethernet-phy@0 { phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>; reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;

View File

@ -26,7 +26,7 @@
#define R_ARC_32_PCREL 0x31 #define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */ /*to set parameters in the core dumps */
#define ELF_ARCH EM_ARCOMPACT #define ELF_ARCH EM_ARC_INUSE
#define ELF_CLASS ELFCLASS32 #define ELF_CLASS ELFCLASS32
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN

View File

@ -156,7 +156,6 @@ END(EV_Extension)
tracesys: tracesys:
; save EFA in case tracer wants the PC of traced task ; save EFA in case tracer wants the PC of traced task
; using ERET won't work since next-PC has already committed ; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
@ -199,15 +198,9 @@ tracesys_exit:
; Breakpoint TRAP ; Breakpoint TRAP
; --------------------------------------------- ; ---------------------------------------------
trap_with_param: trap_with_param:
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
; stop_pc info by gdb needs this info
lr r0, [efa]
mov r1, sp mov r1, sp
; Now that we have read EFA, it is safe to do "fake" rtie
; and get out of CPU exception mode
FAKE_RET_FROM_EXCPN
; Save callee regs in case gdb wants to have a look ; Save callee regs in case gdb wants to have a look
; SP will grow up by size of CALLEE Reg-File ; SP will grow up by size of CALLEE Reg-File
; NOTE: clobbers r12 ; NOTE: clobbers r12
@ -234,6 +227,10 @@ ENTRY(EV_Trap)
EXCEPTION_PROLOGUE EXCEPTION_PROLOGUE
lr r12, [efa]
FAKE_RET_FROM_EXCPN
;============ TRAP 1 :breakpoints ;============ TRAP 1 :breakpoints
; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR) ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
bmsk.f 0, r9, 7 bmsk.f 0, r9, 7
@ -241,9 +238,6 @@ ENTRY(EV_Trap)
;============ TRAP (no param): syscall top level ;============ TRAP (no param): syscall top level
; First return from Exception to pure K mode (Exception/IRQs renabled)
FAKE_RET_FROM_EXCPN
; If syscall tracing ongoing, invoke pre-post-hooks ; If syscall tracing ongoing, invoke pre-post-hooks
GET_CURR_THR_INFO_FLAGS r10 GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE btst r10, TIF_SYSCALL_TRACE

View File

@ -15,6 +15,7 @@
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sizes.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/of_fdt.h> #include <linux/of_fdt.h>
#include <linux/of.h> #include <linux/of.h>
@ -355,12 +356,12 @@ static void arc_chk_core_config(void)
if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
panic("Linux built with incorrect DCCM Base address\n"); panic("Linux built with incorrect DCCM Base address\n");
if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
panic("Linux built with incorrect DCCM Size\n"); panic("Linux built with incorrect DCCM Size\n");
#endif #endif
#ifdef CONFIG_ARC_HAS_ICCM #ifdef CONFIG_ARC_HAS_ICCM
if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
panic("Linux built with incorrect ICCM Size\n"); panic("Linux built with incorrect ICCM Size\n");
#endif #endif

View File

@ -6,6 +6,7 @@
menuconfig ARC_PLAT_EZNPS menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform" bool "\"EZchip\" ARC dev platform"
depends on ISA_ARCOMPACT
select CPU_BIG_ENDIAN select CPU_BIG_ENDIAN
select CLKSRC_NPS if !PHYS_ADDR_T_64BIT select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC select EZNPS_GIC

View File

@ -43,7 +43,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)

View File

@ -249,10 +249,10 @@
status = "disabled"; status = "disabled";
}; };
mailbox: mailbox@25000 { mailbox: mailbox@25c00 {
compatible = "brcm,iproc-fa2-mbox"; compatible = "brcm,iproc-fa2-mbox";
reg = <0x25000 0x445>; reg = <0x25c00 0x400>;
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>; #mbox-cells = <1>;
brcm,rx-status-len = <32>; brcm,rx-status-len = <32>;
brcm,use-bcm-hdr; brcm,use-bcm-hdr;

View File

@ -25,7 +25,7 @@
leds { leds {
act { act {
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>; gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
}; };
}; };

View File

@ -426,7 +426,7 @@
}; };
spi@18029200 { spi@18029200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>, reg = <0x18029200 0x184>,
<0x18029000 0x124>, <0x18029000 0x124>,
<0x1811b408 0x004>, <0x1811b408 0x004>,

View File

@ -65,13 +65,6 @@
}; };
}; };
&clks {
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
};
&ldb { &ldb {
status = "okay"; status = "okay";

View File

@ -65,13 +65,6 @@
}; };
}; };
&clks {
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
};
&ldb { &ldb {
status = "okay"; status = "okay";

View File

@ -53,17 +53,6 @@
}; };
}; };
&clks {
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
};
&ldb { &ldb {
fsl,dual-channel; fsl,dual-channel;
status = "okay"; status = "okay";

View File

@ -92,6 +92,56 @@
mux-int-port = <1>; mux-int-port = <1>;
mux-ext-port = <4>; mux-ext-port = <4>;
}; };
aliases {
mdio-gpio0 = &mdio0;
};
mdio0: mdio-gpio {
compatible = "virtual,mdio-gpio";
gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>, /* mdc */
<&gpio2 7 GPIO_ACTIVE_HIGH>; /* mdio */
#address-cells = <1>;
#size-cells = <0>;
switch@0 {
compatible = "marvell,mv88e6085"; /* 88e6240*/
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
switch_ports: ports {
#address-cells = <1>;
#size-cells = <0>;
};
mdio {
#address-cells = <1>;
#size-cells = <0>;
switchphy0: switchphy@0 {
reg = <0>;
};
switchphy1: switchphy@1 {
reg = <1>;
};
switchphy2: switchphy@2 {
reg = <2>;
};
switchphy3: switchphy@3 {
reg = <3>;
};
switchphy4: switchphy@4 {
reg = <4>;
};
};
};
};
}; };
&ecspi5 { &ecspi5 {
@ -326,3 +376,30 @@
tcxo-clock-frequency = <26000000>; tcxo-clock-frequency = <26000000>;
}; };
}; };
&pcie {
/* Synopsys, Inc. Device */
pci_root: root@0,0 {
compatible = "pci16c3,abcd";
reg = <0x00000000 0 0 0 0>;
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
};
};
&clks {
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
<&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
<&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
};

View File

@ -16,8 +16,10 @@
#interrupt-cells = <2>; #interrupt-cells = <2>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
spi-max-frequency = <3000000>; spi-max-frequency = <9600000>;
spi-cs-high; spi-cs-high;
spi-cpol;
spi-cpha;
cpcap_adc: adc { cpcap_adc: adc {
compatible = "motorola,mapphone-cpcap-adc"; compatible = "motorola,mapphone-cpcap-adc";

View File

@ -84,7 +84,7 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
phy: phy@0 { phy: ethernet-phy@0 {
compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
reg = <0>; reg = <0>;
clocks = <&cru SCLK_MAC_PHY>; clocks = <&cru SCLK_MAC_PHY>;

View File

@ -950,7 +950,7 @@
}; };
}; };
spi-0 { spi0 {
spi0_clk: spi0-clk { spi0_clk: spi0-clk {
rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>; rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>;
}; };
@ -968,7 +968,7 @@
}; };
}; };
spi-1 { spi1 {
spi1_clk: spi1-clk { spi1_clk: spi1-clk {
rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>; rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>;
}; };

View File

@ -706,7 +706,7 @@
}; };
}; };
L2: l2-cache@fffef000 { L2: cache-controller@fffef000 {
compatible = "arm,pl310-cache"; compatible = "arm,pl310-cache";
reg = <0xfffef000 0x1000>; reg = <0xfffef000 0x1000>;
interrupts = <0 38 0x04>; interrupts = <0 38 0x04>;

View File

@ -606,7 +606,7 @@
reg = <0xffcfb100 0x80>; reg = <0xffcfb100 0x80>;
}; };
L2: l2-cache@fffff000 { L2: cache-controller@fffff000 {
compatible = "arm,pl310-cache"; compatible = "arm,pl310-cache";
reg = <0xfffff000 0x1000>; reg = <0xfffff000 0x1000>;
interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
@ -779,7 +779,7 @@
timer3: timer3@ffd00100 { timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer"; compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffd01000 0x100>; reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>; clocks = <&l4_sys_free_clk>;
clock-names = "timer"; clock-names = "timer";
}; };

View File

@ -527,7 +527,7 @@
}; };
ocotp: ocotp@400a5000 { ocotp: ocotp@400a5000 {
compatible = "fsl,vf610-ocotp"; compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>; reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>; clocks = <&clks VF610_CLK_OCOTP>;
}; };

View File

@ -21,11 +21,11 @@
#endif #endif
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h> #include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
#define IOMEM(x) (x) #define IOMEM(x) (x)
@ -374,9 +374,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER() .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
9999: 9999:
.if \inc == 1 .if \inc == 1
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off] \instr\()b\t\cond\().w \reg, [\ptr, #\off]
.elseif \inc == 4 .elseif \inc == 4
\instr\cond\()\t\().w \reg, [\ptr, #\off] \instr\t\cond\().w \reg, [\ptr, #\off]
.else .else
.error "Unsupported inc macro argument" .error "Unsupported inc macro argument"
.endif .endif
@ -415,9 +415,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.rept \rept .rept \rept
9999: 9999:
.if \inc == 1 .if \inc == 1
\instr\cond\()b\()\t \reg, [\ptr], #\inc \instr\()b\t\cond \reg, [\ptr], #\inc
.elseif \inc == 4 .elseif \inc == 4
\instr\cond\()\t \reg, [\ptr], #\inc \instr\t\cond \reg, [\ptr], #\inc
.else .else
.error "Unsupported inc macro argument" .error "Unsupported inc macro argument"
.endif .endif
@ -447,79 +447,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name .size \name , . - \name
.endm .endm
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif
.endm
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
#ifdef CONFIG_CPU_SPECTRE
sub \tmp, \limit, #1
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
movlo \addr, #0 @ if (tmp < 0) addr = NULL
csdb
#endif
.endm
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
str \tmp, [sp, #SVC_DACR]
#endif
.endm
.macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r0, [sp, #SVC_DACR]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg .macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6

View File

@ -16,6 +16,8 @@
#ifndef _ASM_ARM_PERCPU_H_ #ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_
#include <asm/thread_info.h>
/* /*
* Same as asm-generic/percpu.h, except that we store the per cpu offset * Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_UACCESS_ASM_H__
#define __ASM_UACCESS_ASM_H__
#include <asm/asm-offsets.h>
#include <asm/domain.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
.macro csdb
#ifdef CONFIG_THUMB2_KERNEL
.inst.w 0xf3af8014
#else
.inst 0xe320f014
#endif
.endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcscc \tmp, \tmp, \limit
bcs \bad
#ifdef CONFIG_CPU_SPECTRE
movcs \addr, #0
csdb
#endif
#endif
.endm
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
#ifdef CONFIG_CPU_SPECTRE
sub \tmp, \limit, #1
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
movlo \addr, #0 @ if (tmp < 0) addr = NULL
csdb
#endif
.endm
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
#endif
.endm
.macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
#endif
.endm
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
#define DACR(x...) x
#else
#define DACR(x...)
#endif
/*
* Save the address limit on entry to a privileged exception.
*
* If we are using the DACR for kernel access by the user accessors
* (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
* back to client mode, whether or not \disable is set.
*
* If we are using SW PAN, set the DACR user domain to no access
* if \disable is set.
*/
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
mov \tmp2, #TASK_SIZE
str \tmp2, [\tsk, #TI_ADDR_LIMIT]
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
DACR( str \tmp0, [sp, #SVC_DACR])
str \tmp1, [sp, #SVC_ADDR_LIMIT]
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
/* kernel=client, user=no access */
mov \tmp2, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp2, c3, c0, 0
instr_sync
.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
/* kernel=client */
bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
mcr p15, 0, \tmp2, c3, c0, 0
instr_sync
.endif
.endm
/* Restore the user access state previously saved by uaccess_entry */
.macro uaccess_exit, tsk, tmp0, tmp1
ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
DACR( ldr \tmp0, [sp, #SVC_DACR])
str \tmp1, [\tsk, #TI_ADDR_LIMIT]
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
.endm
#undef DACR
#endif /* __ASM_UACCESS_ASM_H__ */

View File

@ -29,13 +29,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0] ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32 tst \tmp, #HWCAP_VFPD32
ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space addeq \base, \base, #32*4 @ step over unused register space
#else #else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers? cmp \tmp, #2 @ 32 x 64bit registers?
ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space addne \base, \base, #32*4 @ step over unused register space
#endif #endif
#endif #endif
@ -53,13 +53,13 @@
ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0] ldr \tmp, [\tmp, #0]
tst \tmp, #HWCAP_VFPD32 tst \tmp, #HWCAP_VFPD32
stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addeq \base, \base, #32*4 @ step over unused register space addeq \base, \base, #32*4 @ step over unused register space
#else #else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
cmp \tmp, #2 @ 32 x 64bit registers? cmp \tmp, #2 @ 32 x 64bit registers?
stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
addne \base, \base, #32*4 @ step over unused register space addne \base, \base, #32*4 @ step over unused register space
#endif #endif
#endif #endif

View File

@ -30,6 +30,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/tls.h> #include <asm/tls.h>
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/uaccess-asm.h>
#include "entry-header.S" #include "entry-header.S"
#include <asm/entry-macro-multi.S> #include <asm/entry-macro-multi.S>
@ -186,15 +187,7 @@ ENDPROC(__und_invalid)
stmia r7, {r2 - r6} stmia r7, {r2 - r6}
get_thread_info tsk get_thread_info tsk
ldr r0, [tsk, #TI_ADDR_LIMIT] uaccess_entry tsk, r0, r1, r2, \uaccess
mov r1, #TASK_SIZE
str r1, [tsk, #TI_ADDR_LIMIT]
str r0, [sp, #SVC_ADDR_LIMIT]
uaccess_save r0
.if \uaccess
uaccess_disable r0
.endif
.if \trace .if \trace
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS

View File

@ -6,6 +6,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/uaccess-asm.h>
#include <asm/v7m.h> #include <asm/v7m.h>
@ Bad Abort numbers @ Bad Abort numbers
@ -217,9 +218,7 @@
blne trace_hardirqs_off blne trace_hardirqs_off
#endif #endif
.endif .endif
ldr r1, [sp, #SVC_ADDR_LIMIT] uaccess_exit tsk, r0, r1
uaccess_restore
str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore @ ARM mode SVC restore
@ -263,9 +262,7 @@
@ on the stack remains correct). @ on the stack remains correct).
@ @
.macro svc_exit_via_fiq .macro svc_exit_via_fiq
ldr r1, [sp, #SVC_ADDR_LIMIT] uaccess_exit tsk, r0, r1
uaccess_restore
str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore @ ARM mode restore
mov r0, sp mov r0, sp

View File

@ -101,6 +101,7 @@ __mmap_switched:
str r2, [r6] @ Save atags pointer str r2, [r6] @ Save atags pointer
cmp r7, #0 cmp r7, #0
strne r0, [r7] @ Save control register values strne r0, [r7] @ Save control register values
mov lr, #0
b start_kernel b start_kernel
ENDPROC(__mmap_switched) ENDPROC(__mmap_switched)

View File

@ -688,6 +688,12 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp); arch_install_hw_breakpoint(bp);
} }
static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
struct arch_hw_breakpoint *info)
{
return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
}
static void watchpoint_handler(unsigned long addr, unsigned int fsr, static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
@ -747,16 +753,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
} }
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
/*
* If we triggered a user watchpoint from a uaccess routine,
* then handle the stepping ourselves since userspace really
* can't help us with this.
*/
if (watchpoint_fault_on_uaccess(regs, info))
goto step;
perf_bp_event(wp, regs); perf_bp_event(wp, regs);
/* /*
* If no overflow handler is present, insert a temporary * Defer stepping to the overflow handler if one is installed.
* mismatch breakpoint so we can single-step over the * Otherwise, insert a temporary mismatch breakpoint so that
* watchpoint trigger. * we can single-step over the watchpoint trigger.
*/ */
if (is_default_overflow_handler(wp)) if (!is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs)); goto unlock;
step:
enable_single_step(wp, instruction_pointer(regs));
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@ -228,8 +228,8 @@ static struct undef_hook arm_break_hook = {
}; };
static struct undef_hook thumb_break_hook = { static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff, .instr_mask = 0xffffffff,
.instr_val = 0xde01, .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT, .cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT, .cpsr_val = PSR_T_BIT,
.fn = break_trap, .fn = break_trap,

View File

@ -20,6 +20,19 @@
* A simple function epilogue looks like this: * A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc} * ldm sp, {fp, sp, pc}
* *
* When compiled with clang, pc and sp are not pushed. A simple function
* prologue looks like this when built with clang:
*
* stmdb {..., fp, lr}
* add fp, sp, #x
* sub sp, sp, #y
*
* A simple function epilogue looks like this when built with clang:
*
* sub sp, fp, #x
* ldm {..., fp, pc}
*
*
* Note that with framepointer enabled, even the leaf functions have the same * Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case. * prologue and epilogue, therefore we can ignore the LR value in this case.
*/ */
@ -32,6 +45,16 @@ int notrace unwind_frame(struct stackframe *frame)
low = frame->sp; low = frame->sp;
high = ALIGN(low, THREAD_SIZE); high = ALIGN(low, THREAD_SIZE);
#ifdef CONFIG_CC_IS_CLANG
/* check current frame pointer is within bounds */
if (fp < low + 4 || fp > high - 4)
return -EINVAL;
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
frame->pc = frame->lr;
frame->lr = *(unsigned long *)(fp + 4);
#else
/* check current frame pointer is within bounds */ /* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4) if (fp < low + 12 || fp > high - 4)
return -EINVAL; return -EINVAL;
@ -40,6 +63,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp - 12); frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8); frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4); frame->pc = *(unsigned long *)(fp - 4);
#endif
return 0; return 0;
} }

View File

@ -7,7 +7,7 @@
ENTRY( \name ) ENTRY( \name )
UNWIND( .fnstart ) UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strbne r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
and r3, r0, #31 @ Get bit offset and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5 mov r0, r0, lsr #5
@ -32,7 +32,7 @@ ENDPROC(\name )
ENTRY( \name ) ENTRY( \name )
UNWIND( .fnstart ) UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strbne r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
and r3, r0, #31 @ Get bit offset and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5 mov r0, r0, lsr #5
@ -62,7 +62,7 @@ ENDPROC(\name )
ENTRY( \name ) ENTRY( \name )
UNWIND( .fnstart ) UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strbne r1, [ip] @ assert word-aligned
and r2, r0, #31 and r2, r0, #31
mov r0, r0, lsr #5 mov r0, r0, lsr #5
mov r3, #1 mov r3, #1
@ -89,7 +89,7 @@ ENDPROC(\name )
ENTRY( \name ) ENTRY( \name )
UNWIND( .fnstart ) UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strbne r1, [ip] @ assert word-aligned
and r3, r0, #31 and r3, r0, #31
mov r0, r0, lsr #5 mov r0, r0, lsr #5
save_and_disable_irqs ip save_and_disable_irqs ip

View File

@ -456,13 +456,13 @@ static void __init at91_pm_sram_init(void)
sram_pool = gen_pool_get(&pdev->dev, NULL); sram_pool = gen_pool_get(&pdev->dev, NULL);
if (!sram_pool) { if (!sram_pool) {
pr_warn("%s: sram pool unavailable!\n", __func__); pr_warn("%s: sram pool unavailable!\n", __func__);
return; goto out_put_device;
} }
sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
if (!sram_base) { if (!sram_base) {
pr_warn("%s: unable to alloc sram!\n", __func__); pr_warn("%s: unable to alloc sram!\n", __func__);
return; goto out_put_device;
} }
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
@ -470,12 +470,17 @@ static void __init at91_pm_sram_init(void)
at91_pm_suspend_in_sram_sz, false); at91_pm_suspend_in_sram_sz, false);
if (!at91_suspend_sram_fn) { if (!at91_suspend_sram_fn) {
pr_warn("SRAM: Could not map\n"); pr_warn("SRAM: Could not map\n");
return; goto out_put_device;
} }
/* Copy the pm suspend handler to SRAM */ /* Copy the pm suspend handler to SRAM */
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
return;
out_put_device:
put_device(&pdev->dev);
return;
} }
static void __init at91_pm_backup_init(void) static void __init at91_pm_backup_init(void)

View File

@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram(
if (!ocram_pool) { if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__); pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV; ret = -ENODEV;
goto put_node; goto put_device;
} }
ocram_base = gen_pool_alloc(ocram_pool, size); ocram_base = gen_pool_alloc(ocram_pool, size);
if (!ocram_base) { if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__); pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto put_node; goto put_device;
} }
phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram(
if (virt_out) if (virt_out)
*virt_out = virt; *virt_out = virt;
put_device:
put_device(&pdev->dev);
put_node: put_node:
of_node_put(node); of_node_put(node);

View File

@ -483,14 +483,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
if (!ocram_pool) { if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__); pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV; ret = -ENODEV;
goto put_node; goto put_device;
} }
ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
if (!ocram_base) { if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__); pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto put_node; goto put_device;
} }
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@ -513,7 +513,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
if (ret) { if (ret) {
pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
goto put_node; goto put_device;
} }
ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
@ -560,7 +560,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
&imx6_suspend, &imx6_suspend,
MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
goto put_node; goto put_device;
pl310_cache_map_failed: pl310_cache_map_failed:
iounmap(pm_info->gpc_base.vbase); iounmap(pm_info->gpc_base.vbase);
@ -570,6 +570,8 @@ iomuxc_map_failed:
iounmap(pm_info->src_base.vbase); iounmap(pm_info->src_base.vbase);
src_map_failed: src_map_failed:
iounmap(pm_info->mmdc_base.vbase); iounmap(pm_info->mmdc_base.vbase);
put_device:
put_device(&pdev->dev);
put_node: put_node:
of_node_put(node); of_node_put(node);

View File

@ -3,6 +3,8 @@ menuconfig ARCH_INTEGRATOR
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6
select ARM_AMBA select ARM_AMBA
select COMMON_CLK_VERSATILE select COMMON_CLK_VERSATILE
select CMA
select DMA_CMA
select HAVE_TCM select HAVE_TCM
select ICST select ICST
select MFD_SYSCON select MFD_SYSCON
@ -34,14 +36,13 @@ config INTEGRATOR_IMPD1
select ARM_VIC select ARM_VIC
select GPIO_PL061 select GPIO_PL061
select GPIOLIB select GPIOLIB
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
help help
The IM-PD1 is an add-on logic module for the Integrator which The IM-PD1 is an add-on logic module for the Integrator which
allows ARM(R) Ltd PrimeCells to be developed and evaluated. allows ARM(R) Ltd PrimeCells to be developed and evaluated.
The IM-PD1 can be found on the Integrator/PP2 platform. The IM-PD1 can be found on the Integrator/PP2 platform.
To compile this driver as a module, choose M here: the
module will be called impd1.
config INTEGRATOR_CM7TDMI config INTEGRATOR_CM7TDMI
bool "Integrator/CM7TDMI core module" bool "Integrator/CM7TDMI core module"
depends on ARCH_INTEGRATOR_AP depends on ARCH_INTEGRATOR_AP

View File

@ -60,14 +60,14 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!ocram_pool) { if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__); pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV; ret = -ENODEV;
goto put_node; goto put_device;
} }
ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
if (!ocram_base) { if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__); pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto put_node; goto put_device;
} }
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@ -78,7 +78,7 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!suspend_ocram_base) { if (!suspend_ocram_base) {
pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
ret = -ENOMEM; ret = -ENOMEM;
goto put_node; goto put_device;
} }
/* Copy the code that puts DDR in self refresh to ocram */ /* Copy the code that puts DDR in self refresh to ocram */
@ -92,6 +92,8 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!socfpga_sdram_self_refresh_in_ocram) if (!socfpga_sdram_self_refresh_in_ocram)
ret = -EFAULT; ret = -EFAULT;
put_device:
put_device(&pdev->dev);
put_node: put_node:
of_node_put(np); of_node_put(np);

View File

@ -108,8 +108,8 @@ static const char * const tegra_dt_board_compat[] = {
}; };
DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
.l2c_aux_val = 0x3c400001, .l2c_aux_val = 0x3c400000,
.l2c_aux_mask = 0xc20fc3fe, .l2c_aux_mask = 0xc20fc3ff,
.smp = smp_ops(tegra_smp_ops), .smp = smp_ops(tegra_smp_ops),
.map_io = tegra_map_common_io, .map_io = tegra_map_common_io,
.init_early = tegra_init_early, .init_early = tegra_init_early,

View File

@ -5,6 +5,7 @@
* VMA_VM_FLAGS * VMA_VM_FLAGS
* VM_EXEC * VM_EXEC
*/ */
#include <linux/const.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
@ -30,7 +31,7 @@
* act_mm - get current->active_mm * act_mm - get current->active_mm
*/ */
.macro act_mm, rd .macro act_mm, rd
bic \rd, sp, #8128 bic \rd, sp, #(THREAD_SIZE - 1) & ~63
bic \rd, \rd, #63 bic \rd, \rd, #63
ldr \rd, [\rd, #TI_TASK] ldr \rd, [\rd, #TI_TASK]
.if (TSK_ACTIVE_MM > IMM12_MASK) .if (TSK_ACTIVE_MM > IMM12_MASK)

View File

@ -245,6 +245,11 @@
}; };
}; };
&hwrng {
clocks = <&clkc CLKID_RNG0>;
clock-names = "core";
};
&i2c_A { &i2c_A {
clocks = <&clkc CLKID_I2C>; clocks = <&clkc CLKID_I2C>;
}; };

View File

@ -745,7 +745,7 @@
}; };
qspi: spi@66470200 { qspi: spi@66470200 {
compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>, reg = <0x66470200 0x184>,
<0x66470000 0x124>, <0x66470000 0x124>,
<0x67017408 0x004>, <0x67017408 0x004>,

View File

@ -155,6 +155,7 @@
regulator-min-microvolt = <700000>; regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1150000>; regulator-max-microvolt = <1150000>;
regulator-enable-ramp-delay = <125>; regulator-enable-ramp-delay = <125>;
regulator-always-on;
}; };
ldo8_reg: LDO8 { ldo8_reg: LDO8 {

View File

@ -210,6 +210,17 @@
status = "ok"; status = "ok";
compatible = "adi,adv7533"; compatible = "adi,adv7533";
reg = <0x39>; reg = <0x39>;
adi,dsi-lanes = <4>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
reg = <1>;
};
};
}; };
}; };

View File

@ -513,7 +513,7 @@
reg = <0x39>; reg = <0x39>;
interrupt-parent = <&gpio1>; interrupt-parent = <&gpio1>;
interrupts = <1 2>; interrupts = <1 2>;
pd-gpio = <&gpio0 4 0>; pd-gpios = <&gpio0 4 0>;
adi,dsi-lanes = <4>; adi,dsi-lanes = <4>;
#sound-dai-cells = <0>; #sound-dai-cells = <0>;

View File

@ -542,7 +542,7 @@
pins = "gpio63", "gpio64", "gpio65", "gpio66", pins = "gpio63", "gpio64", "gpio65", "gpio66",
"gpio67", "gpio68"; "gpio67", "gpio68";
drive-strength = <8>; drive-strength = <8>;
bias-pull-none; bias-disable;
}; };
}; };
cdc_pdm_lines_sus: pdm_lines_off { cdc_pdm_lines_sus: pdm_lines_off {
@ -555,7 +555,7 @@
pins = "gpio63", "gpio64", "gpio65", "gpio66", pins = "gpio63", "gpio64", "gpio65", "gpio66",
"gpio67", "gpio68"; "gpio67", "gpio68";
drive-strength = <2>; drive-strength = <2>;
bias-disable; bias-pull-down;
}; };
}; };
}; };
@ -571,7 +571,7 @@
pins = "gpio113", "gpio114", "gpio115", pins = "gpio113", "gpio114", "gpio115",
"gpio116"; "gpio116";
drive-strength = <8>; drive-strength = <8>;
bias-pull-none; bias-disable;
}; };
}; };
@ -599,7 +599,7 @@
pinconf { pinconf {
pins = "gpio110"; pins = "gpio110";
drive-strength = <8>; drive-strength = <8>;
bias-pull-none; bias-disable;
}; };
}; };
@ -625,7 +625,7 @@
pinconf { pinconf {
pins = "gpio116"; pins = "gpio116";
drive-strength = <8>; drive-strength = <8>;
bias-pull-none; bias-disable;
}; };
}; };
ext_mclk_tlmm_lines_sus: mclk_lines_off { ext_mclk_tlmm_lines_sus: mclk_lines_off {
@ -653,7 +653,7 @@
pins = "gpio112", "gpio117", "gpio118", pins = "gpio112", "gpio117", "gpio118",
"gpio119"; "gpio119";
drive-strength = <8>; drive-strength = <8>;
bias-pull-none; bias-disable;
}; };
}; };
ext_sec_tlmm_lines_sus: tlmm_lines_off { ext_sec_tlmm_lines_sus: tlmm_lines_off {

View File

@ -138,7 +138,7 @@
vcc5v0_host: vcc5v0-host-regulator { vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed"; compatible = "regulator-fixed";
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
enable-active-low; enable-active-low;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>; pinctrl-0 = <&vcc5v0_host_en>;
@ -193,7 +193,7 @@
phy-mode = "rgmii"; phy-mode = "rgmii";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>; pinctrl-0 = <&rgmii_pins>;
snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
snps,reset-active-low; snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>; snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x10>; tx_delay = <0x10>;

View File

@ -1691,10 +1691,10 @@
gpu: gpu@ff9a0000 { gpu: gpu@ff9a0000 {
compatible = "rockchip,rk3399-mali", "arm,mali-t860"; compatible = "rockchip,rk3399-mali", "arm,mali-t860";
reg = <0x0 0xff9a0000 0x0 0x10000>; reg = <0x0 0xff9a0000 0x0 0x10000>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>, interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>, <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>; <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "gpu", "job", "mmu"; interrupt-names = "job", "mmu", "gpu";
clocks = <&cru ACLK_GPU>; clocks = <&cru ACLK_GPU>;
power-domains = <&power RK3399_PD_GPU>; power-domains = <&power RK3399_PD_GPU>;
status = "disabled"; status = "disabled";

View File

@ -68,13 +68,13 @@ void apply_alternatives(void *start, size_t length);
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \ ALTINSTR_ENTRY(feature) \
".popsection\n" \ ".popsection\n" \
".pushsection .altinstr_replacement, \"a\"\n" \ ".subsection 1\n" \
"663:\n\t" \ "663:\n\t" \
newinstr "\n" \ newinstr "\n" \
"664:\n\t" \ "664:\n\t" \
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \ ".org . - (662b-661b) + (664b-663b)\n\t" \
".previous\n" \
".endif\n" ".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
@ -112,9 +112,9 @@ void apply_alternatives(void *start, size_t length);
662: .pushsection .altinstructions, "a" 662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection .popsection
.pushsection .altinstr_replacement, "ax" .subsection 1
663: \insn2 663: \insn2
664: .popsection 664: .previous
.org . - (664b-663b) + (662b-661b) .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b) .org . - (662b-661b) + (664b-663b)
.endif .endif
@ -155,7 +155,7 @@ void apply_alternatives(void *start, size_t length);
.pushsection .altinstructions, "a" .pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection .popsection
.pushsection .altinstr_replacement, "ax" .subsection 1
.align 2 /* So GAS knows label 661 is suitably aligned */ .align 2 /* So GAS knows label 661 is suitably aligned */
661: 661:
.endm .endm
@ -174,9 +174,9 @@ void apply_alternatives(void *start, size_t length);
.macro alternative_else .macro alternative_else
662: 662:
.if .Lasm_alt_mode==0 .if .Lasm_alt_mode==0
.pushsection .altinstr_replacement, "ax" .subsection 1
.else .else
.popsection .previous
.endif .endif
663: 663:
.endm .endm
@ -187,7 +187,7 @@ void apply_alternatives(void *start, size_t length);
.macro alternative_endif .macro alternative_endif
664: 664:
.if .Lasm_alt_mode==0 .if .Lasm_alt_mode==0
.popsection .previous
.endif .endif
.org . - (664b-663b) + (662b-661b) .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b) .org . - (662b-661b) + (664b-663b)

View File

@ -30,16 +30,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
__uint128_t tmp; __uint128_t tmp;
u64 sum; u64 sum;
int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph; tmp = *(const __uint128_t *)iph;
iph += 16; iph += 16;
ihl -= 4; n -= 4;
tmp += ((tmp >> 64) | (tmp << 64)); tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64; sum = tmp >> 64;
do { do {
sum += *(const u32 *)iph; sum += *(const u32 *)iph;
iph += 4; iph += 4;
} while (--ihl); } while (--n > 0);
sum += ((sum >> 32) | (sum << 32)); sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32)); return csum_fold((__force u32)(sum >> 32));

View File

@ -119,6 +119,8 @@ void disable_debug_monitors(enum dbg_active_el el);
void user_rewind_single_step(struct task_struct *task); void user_rewind_single_step(struct task_struct *task);
void user_fastforward_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task);
void user_regs_reset_single_step(struct user_pt_regs *regs,
struct task_struct *task);
void kernel_enable_single_step(struct pt_regs *regs); void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void); void kernel_disable_single_step(void);

View File

@ -78,10 +78,11 @@
* IMO: Override CPSR.I and enable signaling with VI * IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF * FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate * SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
*/ */
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)

View File

@ -83,6 +83,34 @@ extern u32 __init_stage2_translation(void);
*__hyp_this_cpu_ptr(sym); \ *__hyp_this_cpu_ptr(sym); \
}) })
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
" .long (" #from " - .), (" #to " - .)\n" \
" .popsection\n"
#define __kvm_at(at_op, addr) \
( { \
int __kvm_at_err = 0; \
u64 spsr, elr; \
asm volatile( \
" mrs %1, spsr_el2\n" \
" mrs %2, elr_el2\n" \
"1: at "at_op", %3\n" \
" isb\n" \
" b 9f\n" \
"2: msr spsr_el2, %1\n" \
" msr elr_el2, %2\n" \
" mov %w0, %4\n" \
"9:\n" \
__KVM_EXTABLE(1b, 2b) \
: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
: "r" (addr), "i" (-EFAULT)); \
__kvm_at_err; \
} )
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
.macro hyp_adr_this_cpu reg, sym, tmp .macro hyp_adr_this_cpu reg, sym, tmp
@ -107,6 +135,21 @@ extern u32 __init_stage2_translation(void);
kern_hyp_va \vcpu kern_hyp_va \vcpu
.endm .endm
/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
* it can be mapped to EL2. The KVM version is not sorted. The caller must
* ensure:
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
*/
.macro _kvm_extable, from, to
.pushsection __kvm_ex_table, "a"
.align 3
.long (\from - .), (\to - .)
.popsection
.endm
#endif #endif
#endif /* __ARM_KVM_ASM_H__ */ #endif /* __ARM_KVM_ASM_H__ */

View File

@ -292,8 +292,10 @@ struct kvm_vcpu_arch {
* CP14 and CP15 live in the same array, as they are backed by the * CP14 and CP15 live in the same array, as they are backed by the
* same system registers. * same system registers.
*/ */
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) #define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))

View File

@ -65,7 +65,7 @@
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)

View File

@ -44,20 +44,8 @@ struct alt_region {
*/ */
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{ {
unsigned long replptr; unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
if (kernel_text_address(pc))
return 1;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
return 0;
/*
* Branching into *another* alternate sequence is doomed, and
* we're not even trying to fix it up.
*/
BUG();
} }
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))

View File

@ -136,11 +136,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
/* Linux doesn't care about the EL3 */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END, ARM64_FTR_END,
}; };
@ -273,7 +272,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_dfr0[] = { static const struct arm64_ftr_bits ftr_id_dfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), /* [31:28] TraceFilt */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
@ -627,9 +626,6 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
*/
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,

View File

@ -150,17 +150,20 @@ postcore_initcall(debug_monitors_init);
/* /*
* Single step API and exception handling. * Single step API and exception handling.
*/ */
static void set_regs_spsr_ss(struct pt_regs *regs) static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
{ {
regs->pstate |= DBG_SPSR_SS; regs->pstate |= DBG_SPSR_SS;
} }
NOKPROBE_SYMBOL(set_regs_spsr_ss); NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
static void clear_regs_spsr_ss(struct pt_regs *regs) static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
{ {
regs->pstate &= ~DBG_SPSR_SS; regs->pstate &= ~DBG_SPSR_SS;
} }
NOKPROBE_SYMBOL(clear_regs_spsr_ss); NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
/* EL1 Single Step Handler hooks */ /* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook); static LIST_HEAD(step_hook);
@ -386,17 +389,26 @@ void user_rewind_single_step(struct task_struct *task)
* If single step is active for this thread, then set SPSR.SS * If single step is active for this thread, then set SPSR.SS
* to 1 to avoid returning to the active-pending state. * to 1 to avoid returning to the active-pending state.
*/ */
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task)); set_regs_spsr_ss(task_pt_regs(task));
} }
NOKPROBE_SYMBOL(user_rewind_single_step); NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task) void user_fastforward_single_step(struct task_struct *task)
{ {
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
clear_regs_spsr_ss(task_pt_regs(task)); clear_regs_spsr_ss(task_pt_regs(task));
} }
void user_regs_reset_single_step(struct user_pt_regs *regs,
struct task_struct *task)
{
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_user_regs_spsr_ss(regs);
else
clear_user_regs_spsr_ss(regs);
}
/* Kernel API */ /* Kernel API */
void kernel_enable_single_step(struct pt_regs *regs) void kernel_enable_single_step(struct pt_regs *regs)
{ {

View File

@ -738,6 +738,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
return 0; return 0;
} }
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
struct pt_regs *regs)
{
int step = is_default_overflow_handler(wp);
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr;
/*
* If we triggered a user watchpoint from a uaccess routine, then
* handle the stepping ourselves since userspace really can't help
* us with this.
*/
if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
step = 1;
else
perf_bp_event(wp, regs);
return step;
}
static int watchpoint_handler(unsigned long addr, unsigned int esr, static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
@ -747,7 +768,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
u64 val; u64 val;
struct perf_event *wp, **slots; struct perf_event *wp, **slots;
struct debug_info *debug_info; struct debug_info *debug_info;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl; struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
@ -785,25 +805,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
if (dist != 0) if (dist != 0)
continue; continue;
info = counter_arch_bp(wp); step = watchpoint_report(wp, addr, regs);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (is_default_overflow_handler(wp))
step = 1;
} }
if (min_dist > 0 && min_dist != -1) {
/* No exact match found. */
wp = slots[closest_match];
info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */ /* No exact match found? */
if (is_default_overflow_handler(wp)) if (min_dist > 0 && min_dist != -1)
step = 1; step = watchpoint_report(slots[closest_match], addr, regs);
}
rcu_read_unlock(); rcu_read_unlock();
if (!step) if (!step)

View File

@ -258,7 +258,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
if (user_mode(regs) || !kgdb_single_step) if (user_mode(regs) || !kgdb_single_step)
return DBG_HOOK_ERROR; return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs); kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED; return DBG_HOOK_HANDLED;
} }
NOKPROBE_SYMBOL(kgdb_step_brk_fn); NOKPROBE_SYMBOL(kgdb_step_brk_fn);

View File

@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0; return 0;
/* /*
* Compat (i.e. 32 bit) mode: * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
* - PC has been set in the pt_regs struct in kernel_entry, * we're stuck with it for ABI compatability reasons.
* - Handle SP and LR here. *
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
* These correspond directly to a prefix of the registers saved in our
* 'struct pt_regs', with the exception of the PC, so we copy that down
* (x15 corresponds to SP_hyp in the architecture).
*
* So far, so good.
*
* The oddity arises when a 64-bit consumer looks at a 32-bit task and
* asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
* SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
* PC registers would normally live. The initial idea was to allow a
* 64-bit unwinder to unwind a 32-bit task and, although it's not clear
* how well that works in practice, somebody might be relying on it.
*
* At the time we make a sample, we don't know whether the consumer is
* 32-bit or 64-bit, so we have to cater for both possibilities.
*/ */
if (compat_user_mode(regs)) { if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP) if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp; return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR) if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr; return regs->compat_lr;
if (idx == 15)
return regs->pc;
} }
if ((u32)idx == PERF_REG_ARM64_SP) if ((u32)idx == PERF_REG_ARM64_SP)

View File

@ -1496,8 +1496,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
*/ */
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
{ {
if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
regs->pstate &= ~DBG_SPSR_SS; user_regs_reset_single_step(regs, task);
if (is_compat_thread(task_thread_info(task))) if (is_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs); return valid_compat_regs(regs);

View File

@ -24,6 +24,13 @@ ENTRY(_text)
jiffies = jiffies_64; jiffies = jiffies_64;
#define HYPERVISOR_EXTABLE \
. = ALIGN(SZ_8); \
VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \
*(__kvm_ex_table) \
VMLINUX_SYMBOL(__stop___kvm_ex_table) = .;
#define HYPERVISOR_TEXT \ #define HYPERVISOR_TEXT \
/* \ /* \
* Align to 4 KB so that \ * Align to 4 KB so that \
@ -39,6 +46,7 @@ jiffies = jiffies_64;
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
VMLINUX_SYMBOL(__hyp_text_start) = .; \ VMLINUX_SYMBOL(__hyp_text_start) = .; \
*(.hyp.text) \ *(.hyp.text) \
HYPERVISOR_EXTABLE \
VMLINUX_SYMBOL(__hyp_text_end) = .; VMLINUX_SYMBOL(__hyp_text_end) = .;
#define IDMAP_TEXT \ #define IDMAP_TEXT \
@ -154,9 +162,6 @@ SECTIONS
*(.altinstructions) *(.altinstructions)
__alt_instructions_end = .; __alt_instructions_end = .;
} }
.altinstr_replacement : {
*(.altinstr_replacement)
}
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__inittext_end = .; __inittext_end = .;

View File

@ -147,11 +147,15 @@ ENTRY(__kvm_handle_stub_hvc)
1: cmp x0, #HVC_RESET_VECTORS 1: cmp x0, #HVC_RESET_VECTORS
b.ne 1f b.ne 1f
reset:
/* /*
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in * Set the HVC_RESET_VECTORS return code before entering the common
* case we coming via HVC_SOFT_RESTART. * path so that we do not clobber x0-x2 in case we are coming via
* HVC_SOFT_RESTART.
*/ */
mov x0, xzr
reset:
/* Reset kvm back to the hyp stub. */
mrs x5, sctlr_el2 mrs x5, sctlr_el2
ldr x6, =SCTLR_ELx_FLAGS ldr x6, =SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc bic x5, x5, x6 // Clear SCTL_M and etc
@ -162,7 +166,6 @@ reset:
/* Install stub vectors */ /* Install stub vectors */
adr_l x5, __hyp_stub_vectors adr_l x5, __hyp_stub_vectors
msr vbar_el2, x5 msr vbar_el2, x5
mov x0, xzr
eret eret
1: /* Bad stub call */ 1: /* Bad stub call */

View File

@ -17,6 +17,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/fpsimdmacros.h> #include <asm/fpsimdmacros.h>
@ -62,6 +63,15 @@ ENTRY(__guest_enter)
// Store the host regs // Store the host regs
save_callee_saved_regs x1 save_callee_saved_regs x1
// Now the host state is stored if we have a pending RAS SError it must
// affect the host. If any asynchronous exception is pending we defer
// the guest entry.
mrs x1, isr_el1
cbz x1, 1f
mov x0, #ARM_EXCEPTION_IRQ
ret
1:
add x18, x0, #VCPU_CONTEXT add x18, x0, #VCPU_CONTEXT
// Restore guest regs x0-x17 // Restore guest regs x0-x17
@ -135,18 +145,22 @@ ENTRY(__guest_exit)
// This is our single instruction exception window. A pending // This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask // SError is guaranteed to occur at the earliest when we unmask
// it, and at the latest just after the ISB. // it, and at the latest just after the ISB.
.global abort_guest_exit_start
abort_guest_exit_start: abort_guest_exit_start:
isb isb
.global abort_guest_exit_end
abort_guest_exit_end: abort_guest_exit_end:
msr daifset, #4 // Mask aborts
ret
// If the exception took place, restore the EL1 exception _kvm_extable abort_guest_exit_start, 9997f
// context so that we can report some information. _kvm_extable abort_guest_exit_end, 9997f
// Merge the exception code with the SError pending bit. 9997:
tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f msr daifset, #4 // Mask aborts
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
// restore the EL1 exception context so that we can report some
// information. Merge the exception code with the SError pending bit.
msr elr_el2, x2 msr elr_el2, x2
msr esr_el2, x3 msr esr_el2, x3
msr spsr_el2, x4 msr spsr_el2, x4

View File

@ -25,6 +25,30 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
.macro save_caller_saved_regs_vect
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x4, x5, [sp, #-16]!
stp x6, x7, [sp, #-16]!
stp x8, x9, [sp, #-16]!
stp x10, x11, [sp, #-16]!
stp x12, x13, [sp, #-16]!
stp x14, x15, [sp, #-16]!
stp x16, x17, [sp, #-16]!
.endm
.macro restore_caller_saved_regs_vect
ldp x16, x17, [sp], #16
ldp x14, x15, [sp], #16
ldp x12, x13, [sp], #16
ldp x10, x11, [sp], #16
ldp x8, x9, [sp], #16
ldp x6, x7, [sp], #16
ldp x4, x5, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
.endm
.text .text
.pushsection .hyp.text, "ax" .pushsection .hyp.text, "ax"
@ -183,26 +207,24 @@ el1_error:
mov x0, #ARM_EXCEPTION_EL1_SERROR mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit b __guest_exit
el2_sync:
save_caller_saved_regs_vect
stp x29, x30, [sp, #-16]!
bl kvm_unexpected_el2_exception
ldp x29, x30, [sp], #16
restore_caller_saved_regs_vect
eret
el2_error: el2_error:
/* save_caller_saved_regs_vect
* Only two possibilities: stp x29, x30, [sp, #-16]!
* 1) Either we come from the exit path, having just unmasked
* PSTATE.A: change the return code to an EL2 fault, and bl kvm_unexpected_el2_exception
* carry on, as we're already in a sane state to handle it.
* 2) Or we come from anywhere else, and that's a bug: we panic. ldp x29, x30, [sp], #16
* restore_caller_saved_regs_vect
* For (1), x0 contains the original return code and x1 doesn't
* contain anything meaningful at that stage. We can reuse them
* as temp registers.
* For (2), who cares?
*/
mrs x0, elr_el2
adr x1, abort_guest_exit_start
cmp x0, x1
adr x1, abort_guest_exit_end
ccmp x0, x1, #4, ne
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret eret
ENTRY(__hyp_do_panic) ENTRY(__hyp_do_panic)
@ -231,7 +253,6 @@ ENDPROC(\label)
invalid_vector el2t_irq_invalid invalid_vector el2t_irq_invalid
invalid_vector el2t_fiq_invalid invalid_vector el2t_fiq_invalid
invalid_vector el2t_error_invalid invalid_vector el2t_error_invalid
invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid invalid_vector el2h_fiq_invalid
invalid_vector el1_sync_invalid invalid_vector el1_sync_invalid
@ -248,7 +269,7 @@ ENTRY(__kvm_hyp_vector)
ventry el2t_fiq_invalid // FIQ EL2t ventry el2t_fiq_invalid // FIQ EL2t
ventry el2t_error_invalid // Error EL2t ventry el2t_error_invalid // Error EL2t
ventry el2h_sync_invalid // Synchronous EL2h ventry el2_sync // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h ventry el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h ventry el2h_fiq_invalid // FIQ EL2h
ventry el2_error // Error EL2h ventry el2_error // Error EL2h

View File

@ -22,11 +22,15 @@
#include <kvm/arm_psci.h> #include <kvm/arm_psci.h>
#include <asm/extable.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
static bool __hyp_text __fpsimd_enabled_nvhe(void) static bool __hyp_text __fpsimd_enabled_nvhe(void)
{ {
return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
@ -216,10 +220,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
* saved the guest context yet, and we may return early... * saved the guest context yet, and we may return early...
*/ */
par = read_sysreg(par_el1); par = read_sysreg(par_el1);
asm volatile("at s1e1r, %0" : : "r" (far)); if (!__kvm_at("s1e1r", far))
isb(); tmp = read_sysreg(par_el1);
else
tmp = read_sysreg(par_el1); tmp = 1; /* back to the guest */
write_sysreg(par, par_el1); write_sysreg(par, par_el1);
if (unlikely(tmp & 1)) if (unlikely(tmp & 1))
@ -444,7 +448,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
* making sure it is a kernel address and not a PC-relative * making sure it is a kernel address and not a PC-relative
* reference. * reference.
*/ */
asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
__hyp_do_panic(str_va, __hyp_do_panic(str_va,
spsr, elr, spsr, elr,
@ -486,3 +490,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
unreachable(); unreachable();
} }
asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
{
unsigned long addr, fixup;
struct kvm_cpu_context *host_ctxt;
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
entry = hyp_symbol_addr(__start___kvm_ex_table);
end = hyp_symbol_addr(__stop___kvm_ex_table);
host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
while (entry < end) {
addr = (unsigned long)&entry->insn + entry->insn;
fixup = (unsigned long)&entry->fixup + entry->fixup;
if (addr != elr_el2) {
entry++;
continue;
}
write_sysreg(fixup, elr_el2);
return;
}
hyp_panic(host_ctxt);
}

View File

@ -316,8 +316,10 @@ static int __init mcf_pci_init(void)
/* Keep a virtual mapping to IO/config space active */ /* Keep a virtual mapping to IO/config space active */
iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE); iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
if (iospace == 0) if (iospace == 0) {
pci_free_host_bridge(bridge);
return -ENODEV; return -ENODEV;
}
pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n", pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
(u32) iospace); (u32) iospace);

View File

@ -89,9 +89,9 @@
* coherency though in all cases. And for copyback caches we will need * coherency though in all cases. And for copyback caches we will need
* to push cached data as well. * to push cached data as well.
*/ */
#define CACHE_INIT CACR_CINVA #define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
#define CACHE_INVALIDATE CACR_CINVA #define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
#define CACHE_INVALIDATED CACR_CINVA #define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
#define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
(0x000f0000) + \ (0x000f0000) + \

View File

@ -257,6 +257,7 @@ extern int rbv_present,via_alt_mapping;
struct irq_desc; struct irq_desc;
extern void via_l2_flush(int writeback);
extern void via_register_interrupts(void); extern void via_register_interrupts(void);
extern void via_irq_enable(int); extern void via_irq_enable(int);
extern void via_irq_disable(int); extern void via_irq_disable(int);

View File

@ -61,7 +61,6 @@ extern void iop_preinit(void);
extern void iop_init(void); extern void iop_init(void);
extern void via_init(void); extern void via_init(void);
extern void via_init_clock(irq_handler_t func); extern void via_init_clock(irq_handler_t func);
extern void via_flush_cache(void);
extern void oss_init(void); extern void oss_init(void);
extern void psc_init(void); extern void psc_init(void);
extern void baboon_init(void); extern void baboon_init(void);
@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record)
return unknown; return unknown;
} }
/*
* Flip into 24bit mode for an instant - flushes the L2 cache card. We
* have to disable interrupts for this. Our IRQ handlers will crap
* themselves if they take an IRQ in 24bit mode!
*/
static void mac_cache_card_flush(int writeback)
{
unsigned long flags;
local_irq_save(flags);
via_flush_cache();
local_irq_restore(flags);
}
void __init config_mac(void) void __init config_mac(void)
{ {
if (!MACH_IS_MAC) if (!MACH_IS_MAC)
@ -179,9 +163,8 @@ void __init config_mac(void)
* not. * not.
*/ */
if (macintosh_config->ident == MAC_MODEL_IICI if (macintosh_config->ident == MAC_MODEL_IICI)
|| macintosh_config->ident == MAC_MODEL_IIFX) mach_l2_flush = via_l2_flush;
mach_l2_flush = mac_cache_card_flush;
} }

View File

@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
static __inline__ void iop_stop(volatile struct mac_iop *iop) static __inline__ void iop_stop(volatile struct mac_iop *iop)
{ {
iop->status_ctrl &= ~IOP_RUN; iop->status_ctrl = IOP_AUTOINC;
} }
static __inline__ void iop_start(volatile struct mac_iop *iop) static __inline__ void iop_start(volatile struct mac_iop *iop)
@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
iop->status_ctrl = IOP_RUN | IOP_AUTOINC; iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
} }
static __inline__ void iop_bypass(volatile struct mac_iop *iop)
{
iop->status_ctrl |= IOP_BYPASS;
}
static __inline__ void iop_interrupt(volatile struct mac_iop *iop) static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
{ {
iop->status_ctrl |= IOP_IRQ; iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
} }
static int iop_alive(volatile struct mac_iop *iop) static int iop_alive(volatile struct mac_iop *iop)
@ -244,7 +239,6 @@ void __init iop_preinit(void)
} else { } else {
iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
} }
iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
iop_scc_present = 1; iop_scc_present = 1;
} else { } else {
iop_base[IOP_NUM_SCC] = NULL; iop_base[IOP_NUM_SCC] = NULL;
@ -256,7 +250,7 @@ void __init iop_preinit(void)
} else { } else {
iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
} }
iop_base[IOP_NUM_ISM]->status_ctrl = 0; iop_stop(iop_base[IOP_NUM_ISM]);
iop_ism_present = 1; iop_ism_present = 1;
} else { } else {
iop_base[IOP_NUM_ISM] = NULL; iop_base[IOP_NUM_ISM] = NULL;
@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
msg->status = IOP_MSGSTATUS_UNUSED; msg->status = IOP_MSGSTATUS_UNUSED;
msg = msg->next; msg = msg->next;
iop_send_queue[iop_num][chan] = msg; iop_send_queue[iop_num][chan] = msg;
if (msg) iop_do_send(msg); if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
iop_do_send(msg);
} }
/* /*
@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
if (!(q = iop_send_queue[iop_num][chan])) { if (!(q = iop_send_queue[iop_num][chan])) {
iop_send_queue[iop_num][chan] = msg; iop_send_queue[iop_num][chan] = msg;
iop_do_send(msg);
} else { } else {
while (q->next) q = q->next; while (q->next) q = q->next;
q->next = msg; q->next = msg;
} }
if (iop_readb(iop_base[iop_num],
IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
iop_do_send(msg);
}
return 0; return 0;
} }

View File

@ -300,10 +300,14 @@ void via_debug_dump(void)
* the system into 24-bit mode for an instant. * the system into 24-bit mode for an instant.
*/ */
void via_flush_cache(void) void via_l2_flush(int writeback)
{ {
unsigned long flags;
local_irq_save(flags);
via2[gBufB] &= ~VIA2B_vMode32; via2[gBufB] &= ~VIA2B_vMode32;
via2[gBufB] |= VIA2B_vMode32; via2[gBufB] |= VIA2B_vMode32;
local_irq_restore(flags);
} }
/* /*

View File

@ -303,6 +303,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
{ {
int tmp = Q40_RTC_CTRL; int tmp = Q40_RTC_CTRL;
pll->pll_ctrl = 0;
pll->pll_value = tmp & Q40_RTC_PLL_MASK; pll->pll_value = tmp & Q40_RTC_PLL_MASK;
if (tmp & Q40_RTC_PLL_SIGN) if (tmp & Q40_RTC_PLL_SIGN)
pll->pll_value = -pll->pll_value; pll->pll_value = -pll->pll_value;

View File

@ -856,6 +856,7 @@ config SNI_RM
select I8253 select I8253
select I8259 select I8259
select ISA select ISA
select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000 select SYS_HAS_CPU_R5000

View File

@ -279,12 +279,23 @@ ifdef CONFIG_64BIT
endif endif
endif endif
# When linking a 32-bit executable the LLVM linker cannot cope with a
# 32-bit load address that has been sign-extended to 64 bits. Simply
# remove the upper 32 bits then, as it is safe to do so with other
# linkers.
ifdef CONFIG_64BIT
load-ld = $(load-y)
else
load-ld = $(subst 0xffffffff,0x,$(load-y))
endif
KBUILD_AFLAGS += $(cflags-y) KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
LINKER_LOAD_ADDRESS=$(load-ld) \
VMLINUX_ENTRY_ADDRESS=$(entry-y) \ VMLINUX_ENTRY_ADDRESS=$(entry-y) \
PLATFORM="$(platform-y)" \ PLATFORM="$(platform-y)" \
ITS_INPUTS="$(its-y)" ITS_INPUTS="$(its-y)"

View File

@ -87,7 +87,7 @@ ifneq ($(zload-y),)
VMLINUZ_LOAD_ADDRESS := $(zload-y) VMLINUZ_LOAD_ADDRESS := $(zload-y)
else else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS))
endif endif
UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS) UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)

View File

@ -517,6 +517,7 @@ static int __init dwc3_octeon_device_init(void)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) { if (res == NULL) {
put_device(&pdev->dev);
dev_err(&pdev->dev, "No memory resources\n"); dev_err(&pdev->dev, "No memory resources\n");
return -ENXIO; return -ENXIO;
} }
@ -528,8 +529,10 @@ static int __init dwc3_octeon_device_init(void)
* know the difference. * know the difference.
*/ */
base = devm_ioremap_resource(&pdev->dev, res); base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) if (IS_ERR(base)) {
put_device(&pdev->dev);
return PTR_ERR(base); return PTR_ERR(base);
}
mutex_lock(&dwc3_octeon_clocks_mutex); mutex_lock(&dwc3_octeon_clocks_mutex);
dwc3_octeon_clocks_start(&pdev->dev, (u64)base); dwc3_octeon_clocks_start(&pdev->dev, (u64)base);

View File

@ -250,7 +250,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y CONFIG_DRM=y
CONFIG_DRM_RADEON=y CONFIG_DRM_RADEON=m
CONFIG_FB_RADEON=y CONFIG_FB_RADEON=y
CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m CONFIG_LCD_PLATFORM=m

View File

@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_34K: case CPU_34K:
case CPU_1004K: case CPU_1004K:
case CPU_74K: case CPU_74K:
case CPU_1074K:
case CPU_M14KC: case CPU_M14KC:
case CPU_M14KEC: case CPU_M14KEC:
case CPU_INTERAPTIV: case CPU_INTERAPTIV:

View File

@ -274,8 +274,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6 #define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0 #define MIPS3_PG_FRAME 0x3fffffc0
#if defined(CONFIG_64BIT)
#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
#else
#define VPN2_MASK 0xffffe000 #define VPN2_MASK 0xffffe000
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID #endif
#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)

View File

@ -737,7 +737,7 @@
/* MAAR bit definitions */ /* MAAR bit definitions */
#define MIPS_MAAR_VH (_U64CAST_(1) << 63) #define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) #define MIPS_MAAR_ADDR GENMASK_ULL(55, 12)
#define MIPS_MAAR_ADDR_SHIFT 12 #define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1) #define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_VL (_ULCAST_(1) << 0) #define MIPS_MAAR_VL (_ULCAST_(1) << 0)

View File

@ -431,20 +431,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm .endm
.macro __build_clear_fpe .macro __build_clear_fpe
CLI
TRACE_IRQS_OFF
.set push .set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1 .set mips1
SET_HARDFLOAT SET_HARDFLOAT
cfc1 a1, fcr31 cfc1 a1, fcr31
.set pop .set pop
CLI
TRACE_IRQS_OFF
.endm .endm
.macro __build_clear_msa_fpe .macro __build_clear_msa_fpe
_cfcmsa a1, MSA_CSR
CLI CLI
TRACE_IRQS_OFF TRACE_IRQS_OFF
_cfcmsa a1, MSA_CSR
.endm .endm
.macro __build_clear_ade .macro __build_clear_ade

View File

@ -123,9 +123,9 @@ static char *cm2_causes[32] = {
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b", "0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f", "0x0c", "0x0d", "0x0e", "0x0f",
"0x10", "0x11", "0x12", "0x13", "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
"0x14", "0x15", "0x16", "INTVN_WR_ERR", "0x14", "0x15", "0x16", "0x17",
"INTVN_RD_ERR", "0x19", "0x1a", "0x1b", "0x18", "0x19", "0x1a", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f" "0x1c", "0x1d", "0x1e", "0x1f"
}; };

View File

@ -911,7 +911,17 @@ static void __init arch_mem_init(char **cmdline_p)
BOOTMEM_DEFAULT); BOOTMEM_DEFAULT);
#endif #endif
device_tree_init(); device_tree_init();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
* low memory as small as possible before plat_swiotlb_setup(), so
* make sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init(); sparse_init();
memblock_set_bottom_up(true);
plat_swiotlb_setup(); plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

View File

@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/ */
static void bmips_init_secondary(void) static void bmips_init_secondary(void)
{ {
bmips_cpu_setup();
switch (current_cpu_type()) { switch (current_cpu_type()) {
case CPU_BMIPS4350: case CPU_BMIPS4350:
case CPU_BMIPS4380: case CPU_BMIPS4380:

View File

@ -22,12 +22,77 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
static unsigned long glb_lpj_ref;
static unsigned long glb_lpj_ref_freq;
static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
int cpu;
struct cpufreq_freqs *freq = data;
/*
* Skip lpj numbers adjustment if the CPU-freq transition is safe for
* the loops delay. (Is this possible?)
*/
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
/* Save the initial values of the lpjes for future scaling. */
if (!glb_lpj_ref) {
glb_lpj_ref = boot_cpu_data.udelay_val;
glb_lpj_ref_freq = freq->old;
for_each_online_cpu(cpu) {
per_cpu(pcp_lpj_ref, cpu) =
cpu_data[cpu].udelay_val;
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
}
}
cpu = freq->cpu;
/*
* Adjust global lpj variable and per-CPU udelay_val number in
* accordance with the new CPU frequency.
*/
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
glb_lpj_ref_freq,
freq->new);
cpu_data[cpu].udelay_val = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
per_cpu(pcp_lpj_ref_freq, cpu), freq->new);
}
return NOTIFY_OK;
}
static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};
static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
#endif /* CONFIG_CPU_FREQ */
/* /*
* forward reference * forward reference
*/ */

Some files were not shown because too many files have changed in this diff Show More