Merge branch 'locking/urgent' into locking/core, to pick up locking fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2025-03-08 00:54:06 +01:00
commit f23ecef20a
689 changed files with 8158 additions and 3954 deletions

View File

@ -88,7 +88,6 @@ Antonio Quartulli <antonio@mandelbit.com> <antonio@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org>
Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org>
Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net>
Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc>
Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
Archit Taneja <archit@ti.com>
@ -226,6 +225,7 @@ Fangrui Song <i@maskray.me> <maskray@google.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com>
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
Filipe Lautert <filipe@icewall.org>
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
@ -317,6 +317,8 @@ Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
Jean Tourrilhes <jt@hpl.hp.com>
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
@ -519,6 +521,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
@ -531,6 +534,7 @@ Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
@ -609,6 +613,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>

View File

@ -212,6 +212,17 @@ pid>/``).
This value defaults to 0.
core_sort_vma
=============
The default coredump writes VMAs in address order. By setting
``core_sort_vma`` to 1, VMAs will be written from smallest size
to largest size. This is known to break at least elfutils, but
can be handy when dealing with very large (and truncated)
coredumps where the more useful debugging details are included
in the smaller VMAs.
core_uses_pid
=============

View File

@ -18,6 +18,7 @@ Introduction
both access system memory directly and with the same effective
addresses.
**This driver is deprecated and will be removed in a future release.**
Hardware overview
=================
@ -453,7 +454,7 @@ Sysfs Class
A cxl sysfs class is added under /sys/class/cxl to facilitate
enumeration and tuning of the accelerators. Its layout is
described in Documentation/ABI/testing/sysfs-class-cxl
described in Documentation/ABI/obsolete/sysfs-class-cxl
Udev rules

View File

@ -25,7 +25,7 @@ to cache translations for virtual addresses. The IOMMU driver uses the
mmu_notifier() support to keep the device TLB cache and the CPU cache in
sync. When an ATS lookup fails for a virtual address, the device should
use the PRI in order to request the virtual address to be paged into the
CPU page tables. The device must use ATS again in order the fetch the
CPU page tables. The device must use ATS again in order to fetch the
translation before use.
Shared Hardware Workqueues
@ -216,7 +216,7 @@ submitting work and processing completions.
Single Root I/O Virtualization (SR-IOV) focuses on providing independent
hardware interfaces for virtualizing hardware. Hence, it's required to be
almost fully functional interface to software supporting the traditional
an almost fully functional interface to software supporting the traditional
BARs, space for interrupts via MSI-X, its own register layout.
Virtual Functions (VFs) are assisted by the Physical Function (PF)
driver.

View File

@ -53,11 +53,17 @@ properties:
reg:
maxItems: 1
power-controller:
type: object
reboot-mode:
type: object
required:
- compatible
- reg
additionalProperties: true
additionalProperties: false
examples:
- |

View File

@ -33,6 +33,10 @@ properties:
clocks:
maxItems: 1
clock-names:
items:
- const: nf_clk
dmas:
maxItems: 1
@ -51,6 +55,7 @@ required:
- reg-names
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
@ -66,7 +71,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&nf_clk>;
clocks = <&clk>;
clock-names = "nf_clk";
cdns,board-delay-ps = <4830>;
nand@0 {

View File

@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The
straightforward algorithm to use is to apply the inverse of the first idmapping,
mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
either the second idmapping mapping or third idmapping mapping. The second
idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
``u1000`` down to ``u31000``.
idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map
``u1000`` down to ``k31000``.
If we were given the same task for the following three idmappings::

View File

@ -112,7 +112,7 @@ Functions
Callbacks
=========
There are six callbacks:
There are seven callbacks:
::
@ -182,6 +182,13 @@ There are six callbacks:
the length of the message. skb->len - offset may be greater
then full_len since strparser does not trim the skb.
::
int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
The read_sock callback is used by strparser instead of
sock->ops->read_sock, if provided.
::
int (*read_sock_done)(struct strparser *strp, int err);

View File

@ -308,7 +308,7 @@ an involved disclosed party. The current ambassadors list:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
If you want your organization to be added to the ambassadors list, please

View File

@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system:
* sched_rt_period_us takes values from 1 to INT_MAX.
* sched_rt_runtime_us takes values from -1 to sched_rt_period_us.
* A run time of -1 specifies runtime == period, ie. no limit.
* sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve
bandwidth for fair dl_server. For accurate value check average of
runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/
2.2 Default behaviour

View File

@ -287,7 +287,7 @@ revelada involucrada. La lista de embajadores actuales:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
Si quiere que su organización se añada a la lista de embajadores, por

View File

@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
:Date: October 2024
:Date: January 2025
The goal of Landlock is to enable restriction of ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
A sandboxed process can connect to a non-sandboxed process when its domain is
not scoped. If a process's domain is scoped, it can only connect to sockets
created by processes in the same scope.
Moreover, If a process is scoped to send signal to a non-scoped process, it can
Moreover, if a process is scoped to send signal to a non-scoped process, it can
only send signals to processes in the same scope.
A connected datagram socket behaves like a stream socket when its domain is
scoped, meaning if the domain is scoped after the socket is connected , it can
scoped, meaning if the domain is scoped after the socket is connected, it can
still :manpage:`send(2)` data just like a stream socket. However, in the same
scenario, a non-connected datagram socket cannot send data (with
:manpage:`sendto(2)`) outside its scope.

View File

@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
R: Rodrigo Siqueira <siqueira@igalia.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/display/
AMD DISPLAY CORE - DML
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
M: Austin Zheng <austin.zheng@amd.com>
M: Jun Lei <jun.lei@amd.com>
S: Supported
F: drivers/gpu/drm/amd/display/dc/dml/
@ -2210,6 +2210,7 @@ F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
M: Sven Peter <sven@svenpeter.dev>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -2284,7 +2285,7 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
R: Andrew Jeffery <andrew@codeconstruct.com.au>
M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
@ -2877,7 +2878,7 @@ F: drivers/pinctrl/nxp/
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
M: Jan Petrous <jan.petrous@oss.nxp.com>
L: NXP S32 Linux Team <s32@nxp.com>
R: s32@nxp.com
S: Maintained
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@ -5655,7 +5656,7 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
R: Bill Wendling <morbo@google.com>
R: Justin Stitt <justinstitt@google.com>
L: llvm@lists.linux.dev
@ -5774,6 +5775,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
M: Steve French <smfrench@gmail.com>
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
@ -5855,7 +5857,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
M: Joel Becker <jlbec@evilplan.org>
M: Christoph Hellwig <hch@lst.de>
S: Supported
T: git git://git.infradead.org/users/hch/configfs.git
F: fs/configfs/
@ -5926,6 +5927,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
M: Maarten Lankhorst <dev@lankhorst.se>
M: Maxime Ripard <mripard@kernel.org>
M: Natalie Vock <natalie.vock@gmx.de>
L: cgroups@vger.kernel.org
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: include/linux/cgroup_dmem.h
F: kernel/cgroup/dmem.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
@ -6878,7 +6890,6 @@ F: kernel/dma/map_benchmark.c
F: tools/testing/selftests/dma/
DMA MAPPING HELPERS
M: Christoph Hellwig <hch@lst.de>
M: Marek Szyprowski <m.szyprowski@samsung.com>
R: Robin Murphy <robin.murphy@arm.com>
L: iommu@lists.linux.dev
@ -7425,7 +7436,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org
@ -9829,8 +9839,7 @@ F: drivers/input/touchscreen/goodix*
GOOGLE ETHERNET DRIVERS
M: Jeroen de Borst <jeroendb@google.com>
M: Praveen Kaligineedi <pkaligineedi@google.com>
R: Shailend Chand <shailend@google.com>
M: Harshitha Ramamurthy <hramamurthy@google.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@ -12647,7 +12656,9 @@ F: tools/testing/selftests/
KERNEL SMB3 SERVER (KSMBD)
M: Namjae Jeon <linkinjeon@kernel.org>
M: Namjae Jeon <linkinjeon@samba.org>
M: Steve French <sfrench@samba.org>
M: Steve French <smfrench@gmail.com>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Tom Talpey <tom@talpey.com>
L: linux-cifs@vger.kernel.org
@ -15683,7 +15694,7 @@ F: include/uapi/linux/cciss*.h
MICROSOFT MANA RDMA DRIVER
M: Long Li <longli@microsoft.com>
M: Ajay Sharma <sharmaajay@microsoft.com>
M: Konstantin Taranov <kotaranov@microsoft.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/mana/
@ -16472,6 +16483,12 @@ F: net/ethtool/cabletest.c
F: tools/testing/selftests/drivers/net/*/ethtool*
K: cable_test
NETWORKING [ETHTOOL MAC MERGE]
M: Vladimir Oltean <vladimir.oltean@nxp.com>
F: net/ethtool/mm.c
F: tools/testing/selftests/drivers/net/hw/ethtool_mm.sh
K: ethtool_mm
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
@ -19652,7 +19669,6 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
M: Xinhui Pan <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
B: https://gitlab.freedesktop.org/drm/amd/-/issues
@ -19874,7 +19890,7 @@ F: net/rds/
F: tools/testing/selftests/net/rds/
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
M: Tony Luck <tony.luck@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
@ -20325,6 +20341,7 @@ RISC-V ARCHITECTURE
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
R: Alexandre Ghiti <alex@ghiti.fr>
L: linux-riscv@lists.infradead.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-riscv/list/
@ -21918,10 +21935,13 @@ F: sound/soc/uniphier/
SOCKET TIMESTAMPING
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
R: Jason Xing <kernelxing@tencent.com>
S: Maintained
F: Documentation/networking/timestamping.rst
F: include/linux/net_tstamp.h
F: include/uapi/linux/net_tstamp.h
F: tools/testing/selftests/bpf/*/net_timestamping*
F: tools/testing/selftests/net/*timestamp*
F: tools/testing/selftests/net/so_txtime.c
SOEKRIS NET48XX LED SUPPORT
@ -24064,7 +24084,6 @@ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com>
L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -226,7 +226,6 @@
};
&uart5 {
pinctrl-0 = <&uart5_xfer>;
rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@ -396,6 +396,12 @@
status = "okay";
};
&uart5 {
/delete-property/ dmas;
/delete-property/ dma-names;
pinctrl-0 = <&uart5_xfer>;
};
/* Mule UCAN */
&usb_host0_ehci {
status = "okay";

View File

@ -17,8 +17,7 @@
&gmac2io {
phy-handle = <&yt8531c>;
tx_delay = <0x19>;
rx_delay = <0x05>;
phy-mode = "rgmii-id";
status = "okay";
mdio {

View File

@ -15,6 +15,7 @@
&gmac2io {
phy-handle = <&rtl8211e>;
phy-mode = "rgmii";
tx_delay = <0x24>;
rx_delay = <0x18>;
status = "okay";

View File

@ -109,7 +109,6 @@
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
clock_in_out = "input";
phy-mode = "rgmii";
phy-supply = <&vcc_io>;
pinctrl-0 = <&rgmiim1_pins>;
pinctrl-names = "default";

View File

@ -22,11 +22,11 @@
};
/* EC turns on w/ pp900_usb_en */
pp900_usb: pp900-ap {
pp900_usb: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pcie_en */
pp900_pcie: pp900-ap {
pp900_pcie: regulator-pp900-ap {
};
pp3000: regulator-pp3000 {
@ -126,7 +126,7 @@
};
/* Always on; plain and simple */
pp3000_ap: pp3000_emmc: pp3000 {
pp3000_ap: pp3000_emmc: regulator-pp3000 {
};
pp1500_ap_io: regulator-pp1500-ap-io {
@ -160,7 +160,7 @@
};
/* EC turns on w/ pp3300_usb_en_l */
pp3300_usb: pp3300 {
pp3300_usb: regulator-pp3300 {
};
/* gpio is shared with pp1800_pcie and pinctrl is set there */

View File

@ -92,7 +92,7 @@
};
/* EC turns on pp1800_s3_en */
pp1800_s3: pp1800 {
pp1800_s3: regulator-pp1800 {
};
/* pp3300 children, sorted by name */
@ -109,11 +109,11 @@
};
/* EC turns on pp3300_s0_en */
pp3300_s0: pp3300 {
pp3300_s0: regulator-pp3300 {
};
/* EC turns on pp3300_s3_en */
pp3300_s3: pp3300 {
pp3300_s3: regulator-pp3300 {
};
/*

View File

@ -189,39 +189,39 @@
};
/* EC turns on w/ pp900_ddrpll_en */
pp900_ddrpll: pp900-ap {
pp900_ddrpll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pll_en */
pp900_pll: pp900-ap {
pp900_pll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pmu_en */
pp900_pmu: pp900-ap {
pp900_pmu: regulator-pp900-ap {
};
/* EC turns on w/ pp1800_s0_en_l */
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 {
};
/* EC turns on w/ pp1800_avdd_en_l */
pp1800_avdd: pp1800 {
pp1800_avdd: regulator-pp1800 {
};
/* EC turns on w/ pp1800_lid_en_l */
pp1800_lid: pp1800_mic: pp1800 {
pp1800_lid: pp1800_mic: regulator-pp1800 {
};
/* EC turns on w/ lpddr_pwr_en */
pp1800_lpddr: pp1800 {
pp1800_lpddr: regulator-pp1800 {
};
/* EC turns on w/ pp1800_pmu_en_l */
pp1800_pmu: pp1800 {
pp1800_pmu: regulator-pp1800 {
};
/* EC turns on w/ pp1800_usb_en_l */
pp1800_usb: pp1800 {
pp1800_usb: regulator-pp1800 {
};
pp3000_sd_slot: regulator-pp3000-sd-slot {
@ -259,11 +259,11 @@
};
/* EC turns on w/ pp3300_trackpad_en_l */
pp3300_trackpad: pp3300-trackpad {
pp3300_trackpad: regulator-pp3300-trackpad {
};
/* EC turns on w/ usb_a_en */
pp5000_usb_a_vbus: pp5000 {
pp5000_usb_a_vbus: regulator-pp5000 {
};
ap_rtc_clk: ap-rtc-clk {

View File

@ -549,10 +549,10 @@
mmu600_pcie: iommu@fc900000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfc900000 0x0 0x200000>;
interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
};
@ -560,10 +560,10 @@
mmu600_php: iommu@fcb00000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfcb00000 0x0 0x200000>;
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
status = "disabled";
@ -2668,9 +2668,9 @@
rockchip,hw-tshut-temp = <120000>;
rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
pinctrl-0 = <&tsadc_gpio_func>;
pinctrl-1 = <&tsadc_shut>;
pinctrl-names = "gpio", "otpout";
pinctrl-0 = <&tsadc_shut_org>;
pinctrl-1 = <&tsadc_gpio_func>;
pinctrl-names = "default", "sleep";
#thermal-sensor-cells = <1>;
status = "disabled";
};

View File

@ -113,7 +113,7 @@
compatible = "regulator-fixed";
regulator-name = "vcc3v3_lcd";
enable-active-high;
gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&lcdpwr_en>;
vin-supply = <&vcc3v3_sys>;
@ -241,7 +241,7 @@
&pinctrl {
lcd {
lcdpwr_en: lcdpwr-en {
rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
};
bl_en: bl-en {

View File

@ -213,7 +213,6 @@
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
"dma0", "dma1", "dma2", "dma3";
max-link-speed = <3>;
iommus = <&mmu600_pcie 0x0000>;
num-lanes = <4>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";

View File

@ -23,3 +23,7 @@
vpcie3v3-supply = <&vcc3v3_pcie30>;
status = "okay";
};
&mmu600_pcie {
status = "disabled";
};

View File

@ -1551,6 +1551,8 @@ CONFIG_PWM_VISCONTI=m
CONFIG_SL28CPLD_INTC=y
CONFIG_QCOM_PDC=y
CONFIG_QCOM_MPM=y
CONFIG_TI_SCI_INTR_IRQCHIP=y
CONFIG_TI_SCI_INTA_IRQCHIP=y
CONFIG_RESET_GPIO=m
CONFIG_RESET_IMX7=y
CONFIG_RESET_QCOM_AOSS=y

View File

@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
if (stride == PMD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 2);
else if (stride == PUD_SIZE)
__flush_tlb_range(vma, start, end, stride, false, 1);
else
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
switch (stride) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
break;
#endif
case CONT_PMD_SIZE:
case PMD_SIZE:
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
break;
case CONT_PTE_SIZE:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
break;
default:
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
}
}
#endif /* __ASM_HUGETLB_H */

View File

@ -119,7 +119,7 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_DS TCR_EL2_DS

View File

@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)

View File

@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
* which happens eagerly in VHE.
*
* Also, the VMID allocator only preserves VMIDs that are active at the
* time of rollover, so KVM might need to grab a new VMID for the MMU if
* this is called from kvm_sched_in().
*/
kvm_arm_vmid_update(&mmu->vmid);
/*
* We guarantee that both TLBs and I-cache are private to each
* vcpu. If detecting that a vcpu from the same VM has
@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
/*
* The VMID allocator only tracks active VMIDs per
* physical CPU, and therefore the VMID allocated may not be
* preserved on VMID roll-over if the task was preempted,
* making a thread's VMID inactive. So we need to call
* kvm_arm_vmid_update() in non-premptible context.
*/
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
has_vhe())
__load_stage2(vcpu->arch.hw_mmu,
vcpu->arch.hw_mmu->arch);
kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr, ips;
unsigned long tcr;
/*
* Calculate the raw per-cpu offset without a translation from the
@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
params->mair_el2 = read_sysreg(mair_el1);
tcr = read_sysreg(tcr_el1);
ips = FIELD_GET(TCR_IPS_MASK, tcr);
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
tcr |= TCR_EPD1_MASK;
} else {
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
tcr &= TCR_EL2_MASK;
tcr |= TCR_EL2_RES1;
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
}
tcr &= ~TCR_T0SZ_MASK;
tcr |= TCR_T0SZ(hyp_va_bits);
tcr &= ~TCR_EL2_PS_MASK;
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
if (lpa2_is_enabled())
tcr |= TCR_EL2_DS;
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();

View File

@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
}
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
{
unsigned long flags;
u64 vmid, old_active_vmid;
bool updated = false;
vmid = atomic64_read(&kvm_vmid->id);
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
old_active_vmid, vmid))
return false;
return;
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
/* Check that our VMID belongs to the current generation. */
vmid = atomic64_read(&kvm_vmid->id);
if (!vmid_gen_match(vmid)) {
if (!vmid_gen_match(vmid))
vmid = new_vmid(kvm_vmid);
updated = true;
}
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
return updated;
}
/*

View File

@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
int contig_ptes = 0;
int contig_ptes = 1;
*pgsize = size;
switch (size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
if (pud_sect_supported())
contig_ptes = 1;
break;
#endif
case PMD_SIZE:
contig_ptes = 1;
break;
case CONT_PMD_SIZE:
*pgsize = PMD_SIZE;
contig_ptes = CONT_PMDS;
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*pgsize = PAGE_SIZE;
contig_ptes = CONT_PTES;
break;
default:
WARN_ON(!__hugetlb_valid_size(size));
}
return contig_ptes;
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
pte_t orig_pte = __ptep_get(ptep);
unsigned long i;
pte_t pte, tmp_pte;
bool present;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
/*
* If HW_AFDBM is enabled, then the HW could turn on
* the dirty or accessed bit for any page in the set,
* so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
pte = __ptep_get_and_clear(mm, addr, ptep);
present = pte_present(pte);
while (--ncontig) {
ptep++;
addr += pgsize;
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
if (pte_young(tmp_pte))
pte = pte_mkyoung(pte);
}
}
return orig_pte;
return pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
__pte_clear(mm, addr, ptep);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
int ncontig;
size_t pgsize;
pte_t orig_pte = __ptep_get(ptep);
if (!pte_cont(orig_pte))
return __ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
ncontig = num_contig_ptes(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
unsigned long psize = huge_page_size(hstate_vma(vma));
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
if (pte_user_exec(__ptep_get(ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,

View File

@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
/*
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
* map randomization can be enabled by shrinking the IPA space.
*/
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
int parange = cpuid_feature_extract_unsigned_field(
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
s64 range = linear_region_size -

View File

@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = ptep_get(ptep);
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

View File

@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
Elf_Sym *sym, const char *symname))
{
int i;
struct section *extab_sec = sec_lookup("__ex_table");
int extab_index = extab_sec ? extab_sec - secs : -1;
/* Walk through the relocations */
for (i = 0; i < ehdr.e_shnum; i++) {
@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (sec->shdr.sh_type != SHT_REL_TYPE)
continue;
if (sec->shdr.sh_info == extab_index)
continue;
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))

View File

@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
pte_t clear;
pte_t pte = *ptep;
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_tlb_page(vma, addr);
return pte;
}

View File

@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
pte_t entry;

View File

@ -77,9 +77,17 @@
/*
* With 4K page size the real_pte machinery is all nops.
*/
#define __real_pte(e, p, o) ((real_pte_t){(e)})
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
{
return (real_pte_t){pte};
}
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
{
return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
}
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \

View File

@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}
@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
unsigned long sz = huge_page_size(hstate_vma(vma));
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
flush_hugetlb_page(vma, addr);
return pte;
}

View File

@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu)
unsigned long addr;
int err;
area = get_vm_area(PAGE_SIZE, VM_ALLOC);
area = get_vm_area(PAGE_SIZE, 0);
if (!area) {
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
cpu);
@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep
orig_mm = start_using_temp_mm(patching_mm);
kasan_disable_current();
err = __patch_instructions(patch_addr, code, len, repeat_instr);
kasan_enable_current();
/* context synchronisation performed by __patch_instructions */
stop_using_temp_mm(patching_mm, orig_mm);

View File

@ -231,7 +231,7 @@
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
sc_prepend, sc_append, \
cas_prepend, cas_append, \
__ret, __ptr, (long), __old, __new); \
__ret, __ptr, (long)(int)(long), __old, __new); \
break; \
case 8: \
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \

View File

@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
: [ov] "Jr" (oldval), [nv] "Jr" (newval)
: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();

View File

@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
unsigned long addr, pte_t *ptep,
unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
if (!np)
return -ENOENT;
if (of_property_read_bool(np, "cache-size"))
if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
if (of_property_read_bool(np, "i-cache-size"))
if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (of_property_read_bool(np, "d-cache-size"))
if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
prev = np;
@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
break;
if (level <= levels)
break;
if (of_property_read_bool(np, "cache-size"))
if (of_property_present(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
if (of_property_read_bool(np, "i-cache-size"))
if (of_property_present(np, "i-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (of_property_read_bool(np, "d-cache-size"))
if (of_property_present(np, "d-cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
levels = level;
}

View File

@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[bit];
}
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
}
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)

View File

@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
riscv_init_cbo_blocksizes();
riscv_fill_hwcap();
init_rt_signal_env();
apply_boot_alternatives();
init_rt_signal_env();
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))

View File

@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
total_context_size += riscv_v_sc_size;
}
/*
* Preserved a __riscv_ctx_hdr for END signal context header if an
* extension uses __riscv_extra_ext_header
*/
if (total_context_size)
total_context_size += sizeof(struct __riscv_ctx_hdr);
frame_size += total_context_size;

View File

@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
kvm_vcpu_kick(vcpu);
} else {
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);

View File

@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/wordpart.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
if (!kvm_riscv_vcpu_stopped(target_vcpu))
return SBI_HSM_STATE_STARTED;
else if (vcpu->stat.generic.blocking)
if (kvm_riscv_vcpu_stopped(target_vcpu))
return SBI_HSM_STATE_STOPPED;
else if (target_vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else
return SBI_HSM_STATE_STOPPED;
return SBI_HSM_STATE_STARTED;
}
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
return 0;
case SBI_EXT_HSM_HART_SUSPEND:
switch (cp->a0) {
switch (lower_32_bits(cp->a0)) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;

View File

@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long hart_bit = 0, sentmask = 0;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
hart_bit = tmp->vcpu_id - hbase;
if (hart_bit >= __riscv_xlen)
goto done;
if (!(hmask & (1UL << hart_bit)))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
sentmask |= 1UL << hart_bit;
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
done:
if (hbase != -1UL && (hmask ^ sentmask))
retdata->err_val = SBI_ERR_INVALID_PARAM;
return ret;
}

View File

@ -4,6 +4,7 @@
*/
#include <linux/kvm_host.h>
#include <linux/wordpart.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/sbi.h>
@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (funcid) {
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}

View File

@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;

View File

@ -86,7 +86,7 @@ static int cmma_test_essa(void)
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
[tmp] "=&d" (tmp),
[tmp] "+&d" (tmp),
"+Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [psw_old] "a" (&old),

View File

@ -469,6 +469,7 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@ -874,6 +875,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_GRAPH_RETADDR=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y

View File

@ -459,6 +459,7 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@ -825,6 +826,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_GRAPH_RETADDR=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y

View File

@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
unsigned long sz)
{
return __huge_ptep_get_and_clear(mm, addr, ptep);
}
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}

View File

@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
return __rste_to_pte(pte_val(*ptep));
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(mm, addr, ptep);
pmd_t *pmdp = (pmd_t *) ptep;

View File

@ -8,7 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
CFLAGS_sha256.o := -D__NO_FORTIFY
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
@ -19,9 +19,11 @@ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
KBUILD_AFLAGS += -D__DISABLE_EXPORTS
# Since we link purgatory with -r unresolved symbols are not checked, so we
# also link a purgatory.chk binary without -r to check for unresolved symbols.

View File

@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,

View File

@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
pte_t *ptep, unsigned long sz)
{
unsigned int i, nptes, orig_shift, shift;
unsigned long size;

View File

@ -1341,6 +1341,7 @@ config X86_REBOOTFIXUPS
config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
config MICROCODE_INITRD32
def_bool y

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "misc.h"
#include <asm/bootparam.h>
#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
sanitize_boot_params(bp);
boot_params_ptr = bp;
/*

View File

@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
/**
* do_int80_emulation - 32-bit legacy syscall C entry from asm
* @regs: syscall arguments in struct pt_args on the stack.
*
* This entry point can be used by 32-bit and 64-bit programs to perform
* 32-bit system calls. Instances of INT $0x80 can be found inline in

View File

@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= x86_pmu_get_event_config(event);
if (event->attr.sample_period && x86_pmu.limit_period) {
if (!event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)

View File

@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = {
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
INTEL_EVENT_CONSTRAINT(0x20, 0xf),
INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
/*
* Generally event codes < 0x90 are restricted to counters 0-3.
* The 0x2E and 0x3C are exception, which has no restriction.
*/
INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
INTEL_EVENT_CONSTRAINT(0xce, 0x1),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
/*
* Generally event codes >= 0x90 are likely to have no restrictions.
* The exception are defined as above.
*/
INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
EVENT_CONSTRAINT_END
};
@ -3958,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
}
static u64 intel_pmu_freq_start_period(struct perf_event *event)
{
int type = event->attr.type;
u64 config, factor;
s64 start;
/*
* The 127 is the lowest possible recommended SAV (sample after value)
* for a 4000 freq (default freq), according to the event list JSON file.
* Also, assume the workload is idle 50% time.
*/
factor = 64 * 4000;
if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
goto end;
/*
* The estimation of the start period in the freq mode is
* based on the below assumption.
*
* For a cycles or an instructions event, 1GHZ of the
* underlying platform, 1 IPC. The workload is idle 50% time.
* The start period = 1,000,000,000 * 1 / freq / 2.
* = 500,000,000 / freq
*
* Usually, the branch-related events occur less than the
* instructions event. According to the Intel event list JSON
* file, the SAV (sample after value) of a branch-related event
* is usually 1/4 of an instruction event.
* The start period of branch-related events = 125,000,000 / freq.
*
* The cache-related events occurs even less. The SAV is usually
* 1/20 of an instruction event.
* The start period of cache-related events = 25,000,000 / freq.
*/
config = event->attr.config & PERF_HW_EVENT_MASK;
if (type == PERF_TYPE_HARDWARE) {
switch (config) {
case PERF_COUNT_HW_CPU_CYCLES:
case PERF_COUNT_HW_INSTRUCTIONS:
case PERF_COUNT_HW_BUS_CYCLES:
case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
case PERF_COUNT_HW_REF_CPU_CYCLES:
factor = 500000000;
break;
case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
case PERF_COUNT_HW_BRANCH_MISSES:
factor = 125000000;
break;
case PERF_COUNT_HW_CACHE_REFERENCES:
case PERF_COUNT_HW_CACHE_MISSES:
factor = 25000000;
break;
default:
goto end;
}
}
if (type == PERF_TYPE_HW_CACHE)
factor = 25000000;
end:
/*
* Usually, a prime or a number with less factors (close to prime)
* is chosen as an SAV, which makes it less likely that the sampling
* period synchronizes with some periodic event in the workload.
* Minus 1 to make it at least avoiding values near power of twos
* for the default freq.
*/
start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
if (start > x86_pmu.max_period)
start = x86_pmu.max_period;
if (x86_pmu.limit_period)
x86_pmu.limit_period(event, &start);
return start;
}
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@ -3969,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
if (event->attr.freq && event->attr.sample_freq) {
event->hw.sample_period = intel_pmu_freq_start_period(event);
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.sample_period);
}
if (event->attr.precise_ip) {
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
return -EINVAL;

View File

@ -1199,7 +1199,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff),
INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc),
INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */

View File

@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
{},
};

View File

@ -198,9 +198,8 @@
.endm
/*
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
* to the retpoline thunk with a CS prefix when the register requires
* a RAX prefix byte to encode. Also see apply_retpolines().
* Emits a conditional CS prefix that is compatible with
* -mindirect-branch-cs-prefix.
*/
.macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
@ -420,20 +419,27 @@ static inline void call_depth_return_thunk(void) {}
#ifdef CONFIG_X86_64
/*
* Emits a conditional CS prefix that is compatible with
* -mindirect-branch-cs-prefix.
*/
#define __CS_PREFIX(reg) \
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
".ifc \\rs," reg "\n" \
".byte 0x2e\n" \
".endif\n" \
".endr\n"
/*
* Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
*/
# define CALL_NOSPEC \
ALTERNATIVE_2( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE, \
"lfence;\n" \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
X86_FEATURE_RETPOLINE_LFENCE)
#ifdef CONFIG_MITIGATION_RETPOLINE
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
"call __x86_indirect_thunk_%V[thunk_target]\n"
#else
#define CALL_NOSPEC "call *%[thunk_target]\n"
#endif
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)

View File

@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
for (j = 0 ; j < 3 ; j++)
for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;

View File

@ -45,6 +45,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_AVX_VNNI, X86_FEATURE_AVX },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },

View File

@ -153,8 +153,8 @@ static void geode_configure(void)
u8 ccr3;
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
/* Suspend on halt power saving */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */

View File

@ -635,26 +635,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
}
#endif
#define TLB_INST_4K 0x01
#define TLB_INST_4M 0x02
#define TLB_INST_2M_4M 0x03
#define TLB_INST_4K 0x01
#define TLB_INST_4M 0x02
#define TLB_INST_2M_4M 0x03
#define TLB_INST_ALL 0x05
#define TLB_INST_1G 0x06
#define TLB_INST_ALL 0x05
#define TLB_INST_1G 0x06
#define TLB_DATA_4K 0x11
#define TLB_DATA_4M 0x12
#define TLB_DATA_2M_4M 0x13
#define TLB_DATA_4K_4M 0x14
#define TLB_DATA_4K 0x11
#define TLB_DATA_4M 0x12
#define TLB_DATA_2M_4M 0x13
#define TLB_DATA_4K_4M 0x14
#define TLB_DATA_1G 0x16
#define TLB_DATA_1G 0x16
#define TLB_DATA_1G_2M_4M 0x17
#define TLB_DATA0_4K 0x21
#define TLB_DATA0_4M 0x22
#define TLB_DATA0_2M_4M 0x23
#define TLB_DATA0_4K 0x21
#define TLB_DATA0_4M 0x22
#define TLB_DATA0_2M_4M 0x23
#define STLB_4K 0x41
#define STLB_4K_2M 0x42
#define STLB_4K 0x41
#define STLB_4K_2M 0x42
/*
* All of leaf 0x2's one-byte TLB descriptors implies the same number of
* entries for their respective TLB types. The 0x63 descriptor is an
* exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
* for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
* 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
* intel_tlb_table[] mapping.
*/
#define TLB_0x63_2M_4M_ENTRIES 32
static const struct _tlb_table intel_tlb_table[] = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
@ -676,7 +687,8 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
{ 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
" (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
@ -776,6 +788,12 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
case TLB_DATA_1G_2M_4M:
if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
fallthrough;
case TLB_DATA_1G:
if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
@ -799,7 +817,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
for (j = 0 ; j < 3 ; j++)
for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;

View File

@ -23,14 +23,18 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/bsearch.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <crypto/sha2.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
@ -145,6 +149,107 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
*/
static u32 bsp_cpuid_1_eax __ro_after_init;
static bool sha_check = true;
struct patch_digest {
u32 patch_id;
u8 sha256[SHA256_DIGEST_SIZE];
};
#include "amd_shas.c"
static int cmp_id(const void *key, const void *elem)
{
struct patch_digest *pd = (struct patch_digest *)elem;
u32 patch_id = *(u32 *)key;
if (patch_id == pd->patch_id)
return 0;
else if (patch_id < pd->patch_id)
return -1;
else
return 1;
}
static bool need_sha_check(u32 cur_rev)
{
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;
case 0x86081: return cur_rev <= 0x8608108; break;
case 0x87010: return cur_rev <= 0x8701034; break;
case 0x8a000: return cur_rev <= 0x8a0000a; break;
case 0xa0011: return cur_rev <= 0xa0011da; break;
case 0xa0012: return cur_rev <= 0xa001243; break;
case 0xa1011: return cur_rev <= 0xa101153; break;
case 0xa1012: return cur_rev <= 0xa10124e; break;
case 0xa1081: return cur_rev <= 0xa108109; break;
case 0xa2010: return cur_rev <= 0xa20102f; break;
case 0xa2012: return cur_rev <= 0xa201212; break;
case 0xa6012: return cur_rev <= 0xa60120a; break;
case 0xa7041: return cur_rev <= 0xa704109; break;
case 0xa7052: return cur_rev <= 0xa705208; break;
case 0xa7080: return cur_rev <= 0xa708009; break;
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
default: break;
}
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
return true;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state s;
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
x86_family(bsp_cpuid_1_eax) > 0x19)
return true;
if (!need_sha_check(cur_rev))
return true;
if (!sha_check)
return true;
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
if (!pd) {
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
return false;
}
sha256_init(&s);
sha256_update(&s, data, len);
sha256_final(&s, digest);
if (memcmp(digest, pd->sha256, sizeof(digest))) {
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
pr_cont("0x%x ", digest[i]);
pr_info("\n");
return false;
}
return true;
}
static u32 get_patch_level(void)
{
u32 rev, dummy __always_unused;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
return rev;
}
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
{
union zen_patch_rev p;
@ -246,8 +351,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
static bool
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
@ -484,10 +588,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}
static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
u32 rev, dummy;
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
return -1;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
@ -505,47 +612,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
}
/* verify patch application was successful */
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev != mc->hdr.patch_id)
*cur_rev = get_patch_level();
if (*cur_rev != mc->hdr.patch_id)
return false;
return true;
}
/*
* Early load occurs before we can vmalloc(). So we look for the microcode
* patch container file in initrd, traverse equivalent cpu table, look for a
* matching microcode patch, and update, all in initrd memory in place.
* When vmalloc() is available for use later -- on 64-bit during first AP load,
* and on 32-bit during save_microcode_in_initrd_amd() -- we can call
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*
* Returns true if container found (sets @desc), false otherwise.
*/
static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
scan_containers(ucode, size, &desc);
mc = desc.mc;
if (!mc)
return false;
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (old_rev > mc->hdr.patch_id)
return false;
return __apply_microcode_amd(mc, desc.psize);
}
static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@ -583,14 +656,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
return found;
}
/*
* Early load occurs before we can vmalloc(). So we look for the microcode
* patch container file in initrd, traverse equivalent cpu table, look for a
* matching microcode patch, and update, all in initrd memory in place.
* When vmalloc() is available for use later -- on 64-bit during first AP load,
* and on 32-bit during save_microcode_in_initrd() -- we can call
* load_microcode_amd() to save equivalent cpu table and microcode patches in
* kernel heap memory.
*/
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
struct cont_desc desc = { };
struct microcode_amd *mc;
struct cpio_data cp = { };
u32 dummy;
char buf[4];
u32 rev;
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
if (!strncmp(buf, "off", 3)) {
sha_check = false;
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
}
bsp_cpuid_1_eax = cpuid_1_eax;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
rev = get_patch_level();
ed->old_rev = rev;
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@ -598,37 +692,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
if (!find_blobs_in_containers(&cp))
return;
if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
}
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
if (!find_blobs_in_containers(&cp))
return -EINVAL;
scan_containers(cp.data, cp.size, &desc);
if (!desc.mc)
return -EINVAL;
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
mc = desc.mc;
if (!mc)
return;
return 0;
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (ed->old_rev > mc->hdr.patch_id)
return;
if (__apply_microcode_amd(mc, &rev, desc.psize))
ed->new_rev = rev;
}
early_initcall(save_microcode_in_initrd);
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
struct ucode_patch *n,
@ -729,14 +809,9 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, dummy __always_unused;
u16 equiv_id = 0;
/* fetch rev if not populated yet: */
if (!uci->cpu_sig.rev) {
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
uci->cpu_sig.rev = rev;
}
uci->cpu_sig.rev = get_patch_level();
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
@ -759,22 +834,20 @@ void reload_ucode_amd(unsigned int cpu)
mc = p->data;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
rev = get_patch_level();
if (rev < mc->hdr.patch_id) {
if (__apply_microcode_amd(mc, p->size))
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
if (__apply_microcode_amd(mc, &rev, p->size))
pr_info_once("reload revision: 0x%08x\n", rev);
}
}
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct ucode_patch *p;
csig->sig = cpuid_eax(0x00000001);
csig->rev = c->microcode;
csig->rev = get_patch_level();
/*
* a patch could have been loaded early, set uci->mc so that
@ -815,7 +888,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}
if (!__apply_microcode_amd(mc_amd, p->size)) {
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
@ -937,8 +1010,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
}
/* Scan the blob in @data and add microcode patches to the cache. */
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
size_t size)
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
{
u8 *fw = (u8 *)data;
size_t offset;
@ -1013,6 +1085,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
return ret;
}
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
if (!find_blobs_in_containers(&cp))
return -EINVAL;
scan_containers(cp.data, cp.size, &desc);
if (!desc.mc)
return -EINVAL;
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
}
early_initcall(save_microcode_in_initrd);
/*
* AMD microcode firmware naming convention, up to family 15h they are in
* the legacy file:

View File

@ -0,0 +1,444 @@
/* Keep 'em sorted. */
static const struct patch_digest phashes[] = {
{ 0x8001227, {
0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
}
},
{ 0x8001250, {
0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
}
},
{ 0x800126e, {
0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
}
},
{ 0x800126f, {
0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
}
},
{ 0x800820d, {
0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
}
},
{ 0x8301025, {
0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
}
},
{ 0x8301055, {
0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
}
},
{ 0x8301072, {
0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
}
},
{ 0x830107a, {
0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
}
},
{ 0x830107b, {
0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
}
},
{ 0x830107c, {
0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
}
},
{ 0x860010d, {
0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
}
},
{ 0x8608108, {
0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
}
},
{ 0x8701034, {
0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
}
},
{ 0x8a00008, {
0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
}
},
{ 0x8a0000a, {
0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
}
},
{ 0xa00104c, {
0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
}
},
{ 0xa00104e, {
0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
}
},
{ 0xa001053, {
0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
}
},
{ 0xa001058, {
0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
}
},
{ 0xa001075, {
0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
}
},
{ 0xa001078, {
0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
}
},
{ 0xa001079, {
0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
}
},
{ 0xa00107a, {
0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
}
},
{ 0xa001143, {
0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
}
},
{ 0xa001144, {
0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
}
},
{ 0xa00115d, {
0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
}
},
{ 0xa001173, {
0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
}
},
{ 0xa0011a8, {
0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
}
},
{ 0xa0011ce, {
0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
}
},
{ 0xa0011d1, {
0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
}
},
{ 0xa0011d3, {
0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
}
},
{ 0xa0011d5, {
0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
}
},
{ 0xa001223, {
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
}
},
{ 0xa001224, {
0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
}
},
{ 0xa001227, {
0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
}
},
{ 0xa001229, {
0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
}
},
{ 0xa00122e, {
0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
}
},
{ 0xa001231, {
0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
}
},
{ 0xa001234, {
0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
}
},
{ 0xa001236, {
0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
}
},
{ 0xa001238, {
0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
}
},
{ 0xa00820c, {
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
}
},
{ 0xa10113e, {
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
}
},
{ 0xa101144, {
0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
}
},
{ 0xa101148, {
0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
}
},
{ 0xa10123e, {
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
}
},
{ 0xa101244, {
0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
}
},
{ 0xa101248, {
0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
}
},
{ 0xa108108, {
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
}
},
{ 0xa20102d, {
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
}
},
{ 0xa201210, {
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
}
},
{ 0xa404107, {
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
}
},
{ 0xa500011, {
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
}
},
{ 0xa601209, {
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
}
},
{ 0xa704107, {
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
}
},
{ 0xa705206, {
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
}
},
{ 0xa708007, {
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
}
},
{ 0xa70c005, {
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
}
},
{ 0xaa00116, {
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
}
},
{ 0xaa00212, {
0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
}
},
{ 0xaa00213, {
0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
}
},
{ 0xaa00215, {
0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
}
},
};

View File

@ -100,14 +100,12 @@ extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family);
int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
static inline void exit_amd_microcode(void) { }

View File

@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
struct file *backing;
long ret;
/*
* ECREATE would detect this too, but checking here also ensures
* that the 'encl_size' calculations below can never overflow.
*/
if (!is_power_of_2(secs->size))
return -EINVAL;
va_page = sgx_encl_grow(encl, true);
if (IS_ERR(va_page))
return PTR_ERR(va_page);

View File

@ -2,6 +2,7 @@
/*
* Architecture specific OF callbacks.
*/
#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void)
if (initial_dtb)
early_memunmap(dt, map_len);
#endif
if (of_have_populated_dt())
if (acpi_disabled && of_have_populated_dt())
x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config;
}

View File

@ -25,8 +25,10 @@
#include <asm/posted_intr.h>
#include <asm/irq_remapping.h>
#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
#endif
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);

View File

@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
return true;
}
static void kvm_mmu_start_lpage_recovery(struct once *once)
static int kvm_mmu_start_lpage_recovery(struct once *once)
{
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
struct kvm *kvm = container_of(ka, struct kvm, arch);
@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
kvm_nx_huge_page_recovery_worker_kill,
kvm, "kvm-nx-lpage-recovery");
if (!nx_thread)
return;
if (IS_ERR(nx_thread))
return PTR_ERR(nx_thread);
vhost_task_start(nx_thread);
/* Make the task visible only once it is fully started. */
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
return 0;
}
int kvm_mmu_post_init_vm(struct kvm *kvm)
@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm)
if (nx_hugepage_mitigation_hard_disabled)
return 0;
call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
if (!kvm->arch.nx_huge_page_recovery_thread)
return -ENOMEM;
return 0;
return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery);
}
void kvm_mmu_pre_destroy_vm(struct kvm *kvm)

View File

@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
/*
* Process events if an injectable IRQ or NMI is pending, even
* if the event is blocked (RFLAGS.IF is cleared on VM-Exit).
* If an event became pending while L2 was active, KVM needs to
* either inject the event or request an IRQ/NMI window. SMIs
* don't need to be processed as SMM is mutually exclusive with
* non-root mode. INIT/SIPI don't need to be checked as INIT
* is blocked post-VMXON, and SIPIs are ignored.
*/
if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending)
kvm_make_request(KVM_REQ_EVENT, vcpu);
return;
}

View File

@ -12877,11 +12877,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
mutex_unlock(&kvm->slots_lock);
}
kvm_unload_vcpu_mmus(kvm);
kvm_destroy_vcpus(kvm);
kvm_x86_call(vm_destroy)(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_destroy_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm_mmu_uninit_vm(kvm);

View File

@ -77,7 +77,7 @@ struct bio_slab {
struct kmem_cache *slab;
unsigned int slab_ref;
unsigned int slab_size;
char name[8];
char name[12];
};
static DEFINE_MUTEX(bio_slab_lock);
static DEFINE_XARRAY(bio_slabs);

View File

@ -270,7 +270,7 @@ static bool bvec_split_segs(const struct queue_limits *lim,
const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
unsigned max_segs, unsigned max_bytes)
{
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
unsigned max_len = max_bytes - *bytes;
unsigned len = min(bv->bv_len, max_len);
unsigned total_len = 0;
unsigned seg_size = 0;
@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
nsegs++;
bytes += bv.bv_len;
} else {
@ -556,11 +556,14 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
{
struct req_iterator iter = {
.bio = rq->bio,
.iter = rq->bio->bi_iter,
};
struct phys_vec vec;
int nsegs = 0;
/* the internal flush request may not have bio attached */
if (iter.bio)
iter.iter = iter.bio->bi_iter;
while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist);
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,

View File

@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
unsigned int logical_block_sectors;
unsigned long seg_size;
int err;
/*
@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->seg_boundary_mask)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
return -EINVAL;
/*
@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim)
*/
if (!lim->max_segment_size)
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
return -EINVAL;
}
/* setup min segment size for building new segment in fast path */
if (lim->seg_boundary_mask > lim->max_segment_size - 1)
seg_size = lim->max_segment_size;
else
seg_size = lim->seg_boundary_mask + 1;
lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
/*
* We require drivers to at least do logical block aligned I/O, but
* historically could not check for that due to the separate calls

View File

@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk,
}
}
hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]);
atomic_inc(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
return true;
}
static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
sector_t sector)
static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk,
sector_t sector)
{
unsigned int zno = disk_zone_no(disk, sector);
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
return NULL;
}
static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
sector_t sector)
{
if (!atomic_read(&disk->nr_zone_wplugs))
return NULL;
return disk_get_hashed_zone_wplug(disk, sector);
}
static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
{
struct blk_zone_wplug *zwplug =
@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk,
zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED;
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
hlist_del_init_rcu(&zwplug->node);
atomic_dec(&disk->nr_zone_wplugs);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
disk_put_zone_wplug(zwplug);
}
@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
{
struct bio *bio;
if (bio_list_empty(&zwplug->bio_list))
return;
pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n",
zwplug->disk->disk_name, zwplug->zone_no);
while ((bio = bio_list_pop(&zwplug->bio_list)))
blk_zone_wplug_bio_io_error(zwplug, bio);
}
@ -1040,6 +1056,47 @@ plug:
return true;
}
static void blk_zone_wplug_handle_native_zone_append(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
struct blk_zone_wplug *zwplug;
unsigned long flags;
/*
* We have native support for zone append operations, so we are not
* going to handle @bio through plugging. However, we may already have a
* zone write plug for the target zone if that zone was previously
* partially written using regular writes. In such case, we risk leaving
* the plug in the disk hash table if the zone is fully written using
* zone append operations. Avoid this by removing the zone write plug.
*/
zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector);
if (likely(!zwplug))
return;
spin_lock_irqsave(&zwplug->lock, flags);
/*
* We are about to remove the zone write plug. But if the user
* (mistakenly) has issued regular writes together with native zone
* append, we must aborts the writes as otherwise the plugged BIOs would
* not be executed by the plug BIO work as disk_get_zone_wplug() will
* return NULL after the plug is removed. Aborting the plugged write
* BIOs is consistent with the fact that these writes will most likely
* fail anyway as there is no ordering guarantees between zone append
* operations and regular write operations.
*/
if (!bio_list_empty(&zwplug->bio_list)) {
pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n",
disk->disk_name, zwplug->zone_no);
disk_zone_wplug_abort(zwplug);
}
disk_remove_zone_wplug(disk, zwplug);
spin_unlock_irqrestore(&zwplug->lock, flags);
disk_put_zone_wplug(zwplug);
}
/**
* blk_zone_plug_bio - Handle a zone write BIO with zone write plugging
* @bio: The BIO being submitted
@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
*/
switch (bio_op(bio)) {
case REQ_OP_ZONE_APPEND:
if (!bdev_emulates_zone_append(bdev))
if (!bdev_emulates_zone_append(bdev)) {
blk_zone_wplug_handle_native_zone_append(bio);
return false;
}
fallthrough;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk,
{
unsigned int i;
atomic_set(&disk->nr_zone_wplugs, 0);
disk->zone_wplugs_hash_bits =
min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS);
@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
}
}
WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs));
kfree(disk->zone_wplugs_hash);
disk->zone_wplugs_hash = NULL;
disk->zone_wplugs_hash_bits = 0;
@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
}
/*
* We need to track the write pointer of all zones that are not
* empty nor full. So make sure we have a zone write plug for
* such zone if the device has a zone write plug hash table.
* If the device needs zone append emulation, we need to track the
* write pointer of all zones that are not empty nor full. So make sure
* we have a zone write plug for such zone if the device has a zone
* write plug hash table.
*/
if (!disk->zone_wplugs_hash)
if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash)
return 0;
disk_zone_wplug_sync_wp_offset(disk, zone);

View File

@ -14,6 +14,7 @@
struct elevator_type;
#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
#define BLK_MIN_SEGMENT_SIZE 4096
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio,
static inline bool bio_may_need_split(struct bio *bio,
const struct queue_limits *lim)
{
return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
if (lim->chunk_sectors)
return true;
if (bio->bi_vcnt != 1)
return true;
return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
lim->min_segment_size;
}
/**

View File

@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
out[size] = 0;
while (i < size) {
u8 c = le16_to_cpu(in[i]) & 0xff;
u8 c = le16_to_cpu(in[i]) & 0x7f;
if (c && !isprint(c))
c = '!';

View File

@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#define CREATE_TRACE_POINTS

View File

@ -21,9 +21,15 @@ struct platform_profile_handler {
struct device dev;
int minor;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
const struct platform_profile_ops *ops;
};
struct aggregate_choices_data {
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int count;
};
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
if (!test_bit(*bit, handler->choices))
if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
return -EOPNOTSUPP;
return handler->ops->profile_set(dev, *bit);
@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
* @data: The available profile choices
* @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
static int _aggregate_choices(struct device *dev, void *data)
static int _aggregate_choices(struct device *dev, void *arg)
{
unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
unsigned long *aggregate = data;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
else
bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
data->count++;
return 0;
}
/**
* _remove_hidden_choices - Remove hidden choices from aggregate data
* @dev: The device
* @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
static int _remove_hidden_choices(struct device *dev, void *arg)
{
struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
bitmap_andnot(data->aggregate, handler->choices,
handler->hidden_choices, PLATFORM_PROFILE_LAST);
return 0;
}
@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
int err;
set_bit(PLATFORM_PROFILE_LAST, aggregate);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
aggregate, _aggregate_choices);
&data, _aggregate_choices);
if (err)
return err;
if (data.count == 1) {
err = class_for_each_device(&platform_profile_class, NULL,
&data, _remove_hidden_choices);
if (err)
return err;
}
}
/* no profile handler registered any more */
if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
return -EINVAL;
return _commmon_choices_show(aggregate, buf);
return _commmon_choices_show(data.aggregate, buf);
}
/**
@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
int ret;
int i;
@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
set_bit(PLATFORM_PROFILE_LAST, choices);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
choices, _aggregate_choices);
&data, _aggregate_choices);
if (ret)
return ret;
if (!test_bit(i, choices))
if (!test_bit(i, data.aggregate))
return -EOPNOTSUPP;
ret = class_for_each_device(&platform_profile_class, NULL, &i,
@ -417,8 +458,14 @@ static int profile_class_registered(struct device *dev, const void *data)
static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
struct device *dev;
dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
if (!dev)
return 0;
put_device(dev);
return attr->mode;
}
@ -447,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
*/
int platform_profile_cycle(void)
{
struct aggregate_choices_data data = {
.aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
.count = 0,
};
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
set_bit(PLATFORM_PROFILE_LAST, choices);
set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
@ -464,14 +514,14 @@ int platform_profile_cycle(void)
return -EINVAL;
err = class_for_each_device(&platform_profile_class, NULL,
choices, _aggregate_choices);
&data, _aggregate_choices);
if (err)
return err;
/* never iterate into a custom if all drivers supported it */
clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
next = find_next_bit_wrap(choices,
next = find_next_bit_wrap(data.aggregate,
PLATFORM_PROFILE_LAST,
profile + 1);
@ -526,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
return ERR_PTR(-EINVAL);
}
if (ops->hidden_choices) {
err = ops->hidden_choices(drvdata, pprof->hidden_choices);
if (err) {
dev_err(dev, "platform_profile hidden_choices failed\n");
return ERR_PTR(err);
}
}
guard(mutex)(&profile_lock);
/* create class interface for individual handler */

View File

@ -386,8 +386,12 @@ struct ahci_host_priv {
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
unsigned int portid)
{
return portid >= hpriv->nports ||
!(hpriv->mask_port_map & (1 << portid));
if (portid >= hpriv->nports)
return true;
/* mask_port_map not set means that all ports are available */
if (!hpriv->mask_port_map)
return false;
return !(hpriv->mask_port_map & (1 << portid));
}
extern int ahci_ignore_sss;

View File

@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
/* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,

View File

@ -651,8 +651,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* If no sub-node was found, keep this for device tree
* compatibility
*/
hpriv->mask_port_map |= BIT(0);
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;

View File

@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
{ "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI |
ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },

View File

@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
if (test_bit(UB_STATE_USED, &ub->state)) {
/*
* Parameters can only be changed when device hasn't
* been started yet
*/
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;

View File

@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
if (hci_conn_num(hdev, SCO_LINK) < 1)
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
if (hci_conn_num(hdev, SCO_LINK) < 1)
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@ -3642,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,

View File

@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
if (pipe_empty(pipe->head, pipe->tail))
if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
occupancy = pipe_occupancy(pipe->head, pipe->tail);
occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {

View File

@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
return 0;
}
static int jcore_pit_local_teardown(unsigned cpu)
{
struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
disable_percpu_irq(pit->ced.irq);
return 0;
}
@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
irq_set_percpu_devid(pit_irq);
err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
"jcore_pit", jcore_pit_percpu);
if (err) {
@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
jcore_pit_local_init, NULL);
jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}

View File

@ -59,9 +59,6 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
#define BAM_NDP_REVISION_START 0x20
#define BAM_NDP_REVISION_END 0x27
struct bam_async_desc {
struct virt_dma_desc vd;
@ -401,7 +398,6 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
u32 bam_revision;
};
/**
@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
BAM_NDP_REVISION_END))
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
writel_relaxed(DEFAULT_CNT_THRSHLD,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
BAM_NDP_REVISION_END))
writel_relaxed(maxburst,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
writel_relaxed(maxburst,
bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
if (!bdev->num_ees)
if (!bdev->num_ees) {
val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
bdev->bam_revision = val & REVISION_MASK;
}
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)

Some files were not shown because too many files have changed in this diff Show More