mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-17 02:59:58 +02:00
This is the 4.14.259 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmHC30MACgkQONu9yGCS aT5xhBAA0wniOswRLyYHKAbYeaMExVDEFt/ogJRXBSbG0Npzsi7gYMQtkWgiyJMv WMPRQ6EQARYDAWiOUOVf1PKJJn+8zY0M/yxkuVZwp0GKyqwlAJ9MgVJL8hlaAE32 xJuM+e1X22eGLrwUA6ZcIUI0sWEVU7fyon4KS27Gtew1wD3ehbWdief28gW9Jqdq ewSdo/SyJVA6km6nuWdGbFPVrYaifOSyBBO2g2r5GZOogMx7lF8rq4IpLIZI7Mi5 72Aw3TjhphA4aeNBbgWk1nFOng3SoaaAeBSX66ZRNesZDc7egUmMY06/uRfpq9bv b4lYIe392zrPxuMC6LnPcGjv6TjoNl29dZLjRVfM9UsEcbYaFNRsArnAKaLf/lrn va9hCZQ6d3pNej625aCHT7hMXNGSdP5lcsMA8rM+T84rY0W0vK/kFyFZgXJ6hgA5 xbsIVxo4IFgq9AhyT4r33SVZEf/QEbvdoOC8eRenl0xG4NZObxUGrKXBBTFJYiFm jhNeViIhKNXRY/u+nqzLuvkiBBVrRIB2T3yuWqKYb4KkrWMXeeoUISClRiIXKyrg 5HmsYERDBEGrQIfepWp/VCnV0d4NYngUdz7qdMzUpe4uDex2RWljI0MJYvkD2Jyj c5T+OR/8TjU9TUmg1I6zEUBIbrP4q1XgWhYXjoz/Kb85CsFe77A= =q/P/ -----END PGP SIGNATURE----- Merge tag 'v4.14.259' into v4.14/standard/base This is the 4.14.259 stable release # gpg: Signature made Wed 22 Dec 2021 03:18:11 AM EST # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
2a8a6e73f8
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 258
|
||||
SUBLEVEL = 259
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
|
|
@ -980,4 +980,7 @@ config HAVE_ARCH_COMPILER_H
|
|||
linux/compiler-*.h in order to override macro definitions that those
|
||||
headers generally provide.
|
||||
|
||||
config ARCH_USE_MEMREMAP_PROT
|
||||
bool
|
||||
|
||||
source "kernel/gcov/Kconfig"
|
||||
|
|
|
@ -16,30 +16,42 @@ config ARM_PTDUMP
|
|||
kernel.
|
||||
If in doubt, say "N"
|
||||
|
||||
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
|
||||
# If you know what you are doing and are willing to live without stack
|
||||
# traces, you can get a slightly smaller kernel by setting this option to
|
||||
# n, but then RMK will have to kill you ;).
|
||||
config FRAME_POINTER
|
||||
bool
|
||||
depends on !THUMB2_KERNEL
|
||||
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
|
||||
choice
|
||||
prompt "Choose kernel unwinder"
|
||||
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
|
||||
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
|
||||
help
|
||||
If you say N here, the resulting kernel will be slightly smaller and
|
||||
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
|
||||
when a problem occurs with the kernel, the information that is
|
||||
reported is severely limited.
|
||||
This determines which method will be used for unwinding kernel stack
|
||||
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
|
||||
livepatch, lockdep, and more.
|
||||
|
||||
config ARM_UNWIND
|
||||
bool "Enable stack unwinding support (EXPERIMENTAL)"
|
||||
config UNWINDER_FRAME_POINTER
|
||||
bool "Frame pointer unwinder"
|
||||
depends on !THUMB2_KERNEL && !CC_IS_CLANG
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select FRAME_POINTER
|
||||
help
|
||||
This option enables the frame pointer unwinder for unwinding
|
||||
kernel stack traces.
|
||||
|
||||
config UNWINDER_ARM
|
||||
bool "ARM EABI stack unwinder"
|
||||
depends on AEABI
|
||||
default y
|
||||
select ARM_UNWIND
|
||||
help
|
||||
This option enables stack unwinding support in the kernel
|
||||
using the information automatically generated by the
|
||||
compiler. The resulting kernel image is slightly bigger but
|
||||
the performance is not affected. Currently, this feature
|
||||
only works with EABI compilers. If unsure say Y.
|
||||
only works with EABI compilers.
|
||||
|
||||
endchoice
|
||||
|
||||
config ARM_UNWIND
|
||||
bool
|
||||
|
||||
config FRAME_POINTER
|
||||
bool
|
||||
|
||||
config OLD_MCOUNT
|
||||
bool
|
||||
|
|
|
@ -51,6 +51,6 @@
|
|||
#define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0
|
||||
#define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0
|
||||
#define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0
|
||||
#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0
|
||||
#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0
|
||||
|
||||
#endif /* __DTS_IMX6ULL_PINFUNC_H */
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
flash0: n25q00@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q00aa";
|
||||
compatible = "micron,mt25qu02g", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <100000000>;
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@
|
|||
flash: flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q256a";
|
||||
compatible = "micron,n25q256a", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <100000000>;
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@
|
|||
flash0: n25q00@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q00";
|
||||
compatible = "micron,mt25qu02g", "jedec,spi-nor";
|
||||
reg = <0>; /* chip select */
|
||||
spi-max-frequency = <100000000>;
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@
|
|||
flash: flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q00";
|
||||
compatible = "micron,mt25qu02g", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <100000000>;
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@
|
|||
flash: flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q256a";
|
||||
compatible = "micron,n25q256a", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <100000000>;
|
||||
m25p,fast-read;
|
||||
|
|
|
@ -128,7 +128,7 @@
|
|||
flash0: n25q512a@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q512a";
|
||||
compatible = "micron,n25q512a", "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <100000000>;
|
||||
|
||||
|
|
|
@ -249,7 +249,7 @@
|
|||
n25q128@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q128";
|
||||
compatible = "micron,n25q128", "jedec,spi-nor";
|
||||
reg = <0>; /* chip select */
|
||||
spi-max-frequency = <100000000>;
|
||||
m25p,fast-read;
|
||||
|
@ -266,7 +266,7 @@
|
|||
n25q00@1 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "n25q00";
|
||||
compatible = "micron,mt25qu02g", "jedec,spi-nor";
|
||||
reg = <1>; /* chip select */
|
||||
spi-max-frequency = <100000000>;
|
||||
m25p,fast-read;
|
||||
|
|
|
@ -17,26 +17,25 @@
|
|||
/*
|
||||
* Faraday optimised copy_user_page
|
||||
*/
|
||||
static void __naked
|
||||
fa_copy_user_page(void *kto, const void *kfrom)
|
||||
static void fa_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %0 @ 1\n\
|
||||
1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia r0, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\
|
||||
add r0, r0, #16 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia r0, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\
|
||||
add r0, r0, #16 @ 1\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia %0, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
|
||||
add %0, %0, #16 @ 1\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia %0, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\
|
||||
add %0, %0, #16 @ 1\n\
|
||||
subs %2, %2, #1 @ 1\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 32));
|
||||
mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB"
|
||||
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 32)
|
||||
: "r3", "r4", "ip", "lr");
|
||||
}
|
||||
|
||||
void fa_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -13,58 +13,56 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
static void __naked
|
||||
feroceon_copy_user_page(void *kto, const void *kfrom)
|
||||
static void feroceon_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4-r9, lr} \n\
|
||||
mov ip, %2 \n\
|
||||
1: mov lr, r1 \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
pld [lr, #32] \n\
|
||||
pld [lr, #64] \n\
|
||||
pld [lr, #96] \n\
|
||||
pld [lr, #128] \n\
|
||||
pld [lr, #160] \n\
|
||||
pld [lr, #192] \n\
|
||||
pld [lr, #224] \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
subs ip, ip, #(32 * 8) \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
1: ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
pld [%1, #0] \n\
|
||||
pld [%1, #32] \n\
|
||||
pld [%1, #64] \n\
|
||||
pld [%1, #96] \n\
|
||||
pld [%1, #128] \n\
|
||||
pld [%1, #160] \n\
|
||||
pld [%1, #192] \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
ldmia %1!, {r2 - r7, ip, lr} \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
stmia %0, {r2 - r7, ip, lr} \n\
|
||||
subs %2, %2, #(32 * 8) \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
bne 1b \n\
|
||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
|
||||
ldmfd sp!, {r4-r9, pc}"
|
||||
:
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
|
||||
mcr p15, 0, %2, c7, c10, 4 @ drain WB"
|
||||
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE)
|
||||
: "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
|
||||
}
|
||||
|
||||
void feroceon_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
|
|||
* instruction. If your processor does not supply this, you have to write your
|
||||
* own copy_user_highpage that does the right thing.
|
||||
*/
|
||||
static void __naked
|
||||
mc_copy_user_page(void *from, void *to)
|
||||
static void mc_copy_user_page(void *from, void *to)
|
||||
{
|
||||
asm volatile(
|
||||
"stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r4, %2 @ 1\n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %1!, {r2, r3, ip, lr} @ 4\n\
|
||||
|
@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to)
|
|||
mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %1!, {r2, r3, ip, lr} @ 4\n\
|
||||
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
subs r4, r4, #1 @ 1\n\
|
||||
subs %2, %2, #1 @ 1\n\
|
||||
stmia %1!, {r2, r3, ip, lr} @ 4\n\
|
||||
ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
|
||||
bne 1b @ "
|
||||
: "+&r" (from), "+&r" (to), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 64)
|
||||
: "r2", "r3", "ip", "lr");
|
||||
}
|
||||
|
||||
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -22,29 +22,28 @@
|
|||
* instruction. If your processor does not supply this, you have to write your
|
||||
* own copy_user_highpage that does the right thing.
|
||||
*/
|
||||
static void __naked
|
||||
v4wb_copy_user_page(void *kto, const void *kfrom)
|
||||
static void v4wb_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %2 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs %2, %2, #1 @ 1\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
|
||||
mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
|
||||
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 64)
|
||||
: "r3", "r4", "ip", "lr");
|
||||
}
|
||||
|
||||
void v4wb_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -20,27 +20,26 @@
|
|||
* dirty data in the cache. However, we do have to ensure that
|
||||
* subsequent reads are up to date.
|
||||
*/
|
||||
static void __naked
|
||||
v4wt_copy_user_page(void *kto, const void *kfrom)
|
||||
static void v4wt_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %2 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs %2, %2, #1 @ 1\n\
|
||||
stmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
|
||||
mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
|
||||
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 64)
|
||||
: "r3", "r4", "ip", "lr");
|
||||
}
|
||||
|
||||
void v4wt_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -21,53 +21,46 @@
|
|||
|
||||
/*
|
||||
* XSC3 optimised copy_user_highpage
|
||||
* r0 = destination
|
||||
* r1 = source
|
||||
*
|
||||
* The source page may have some clean entries in the cache already, but we
|
||||
* can safely ignore them - break_cow() will flush them out of the cache
|
||||
* if we eventually end up using our copied page.
|
||||
*
|
||||
*/
|
||||
static void __naked
|
||||
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
||||
static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, r5, lr} \n\
|
||||
mov lr, %2 \n\
|
||||
int tmp;
|
||||
|
||||
asm volatile ("\
|
||||
pld [%1, #0] \n\
|
||||
pld [%1, #32] \n\
|
||||
1: pld [%1, #64] \n\
|
||||
pld [%1, #96] \n\
|
||||
\n\
|
||||
pld [r1, #0] \n\
|
||||
pld [r1, #32] \n\
|
||||
1: pld [r1, #64] \n\
|
||||
pld [r1, #96] \n\
|
||||
\n\
|
||||
2: ldrd r2, [r1], #8 \n\
|
||||
mov ip, r0 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
mov ip, r0 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
subs lr, lr, #1 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
2: ldrd r2, [%1], #8 \n\
|
||||
ldrd r4, [%1], #8 \n\
|
||||
mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [%0], #8 \n\
|
||||
ldrd r2, [%1], #8 \n\
|
||||
strd r4, [%0], #8 \n\
|
||||
ldrd r4, [%1], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r4, [%0], #8 \n\
|
||||
ldrd r2, [%1], #8 \n\
|
||||
ldrd r4, [%1], #8 \n\
|
||||
mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [%0], #8 \n\
|
||||
ldrd r2, [%1], #8 \n\
|
||||
subs %2, %2, #1 \n\
|
||||
strd r4, [%0], #8 \n\
|
||||
ldrd r4, [%1], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r4, [%0], #8 \n\
|
||||
bgt 1b \n\
|
||||
beq 2b \n\
|
||||
\n\
|
||||
ldmfd sp!, {r4, r5, pc}"
|
||||
:
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
|
||||
beq 2b "
|
||||
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 64 - 1)
|
||||
: "r2", "r3", "r4", "r5");
|
||||
}
|
||||
|
||||
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
|
|||
|
||||
/*
|
||||
* XScale optimised clear_user_page
|
||||
* r0 = destination
|
||||
* r1 = virtual user address of ultimate destination page
|
||||
*/
|
||||
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
|
|
|
@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
|
|||
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
|
||||
* and merged as appropriate.
|
||||
*/
|
||||
static void __naked
|
||||
mc_copy_user_page(void *from, void *to)
|
||||
static void mc_copy_user_page(void *from, void *to)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
/*
|
||||
* Strangely enough, best performance is achieved
|
||||
* when prefetching destination as well. (NP)
|
||||
*/
|
||||
asm volatile(
|
||||
"stmfd sp!, {r4, r5, lr} \n\
|
||||
mov lr, %2 \n\
|
||||
pld [r0, #0] \n\
|
||||
pld [r0, #32] \n\
|
||||
pld [r1, #0] \n\
|
||||
pld [r1, #32] \n\
|
||||
1: pld [r0, #64] \n\
|
||||
pld [r0, #96] \n\
|
||||
pld [r1, #64] \n\
|
||||
pld [r1, #96] \n\
|
||||
2: ldrd r2, [r0], #8 \n\
|
||||
ldrd r4, [r0], #8 \n\
|
||||
mov ip, r1 \n\
|
||||
strd r2, [r1], #8 \n\
|
||||
ldrd r2, [r0], #8 \n\
|
||||
strd r4, [r1], #8 \n\
|
||||
ldrd r4, [r0], #8 \n\
|
||||
strd r2, [r1], #8 \n\
|
||||
strd r4, [r1], #8 \n\
|
||||
asm volatile ("\
|
||||
pld [%0, #0] \n\
|
||||
pld [%0, #32] \n\
|
||||
pld [%1, #0] \n\
|
||||
pld [%1, #32] \n\
|
||||
1: pld [%0, #64] \n\
|
||||
pld [%0, #96] \n\
|
||||
pld [%1, #64] \n\
|
||||
pld [%1, #96] \n\
|
||||
2: ldrd r2, [%0], #8 \n\
|
||||
ldrd r4, [%0], #8 \n\
|
||||
mov ip, %1 \n\
|
||||
strd r2, [%1], #8 \n\
|
||||
ldrd r2, [%0], #8 \n\
|
||||
strd r4, [%1], #8 \n\
|
||||
ldrd r4, [%0], #8 \n\
|
||||
strd r2, [%1], #8 \n\
|
||||
strd r4, [%1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
|
||||
ldrd r2, [r0], #8 \n\
|
||||
ldrd r2, [%0], #8 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
|
||||
ldrd r4, [r0], #8 \n\
|
||||
mov ip, r1 \n\
|
||||
strd r2, [r1], #8 \n\
|
||||
ldrd r2, [r0], #8 \n\
|
||||
strd r4, [r1], #8 \n\
|
||||
ldrd r4, [r0], #8 \n\
|
||||
strd r2, [r1], #8 \n\
|
||||
strd r4, [r1], #8 \n\
|
||||
ldrd r4, [%0], #8 \n\
|
||||
mov ip, %1 \n\
|
||||
strd r2, [%1], #8 \n\
|
||||
ldrd r2, [%0], #8 \n\
|
||||
strd r4, [%1], #8 \n\
|
||||
ldrd r4, [%0], #8 \n\
|
||||
strd r2, [%1], #8 \n\
|
||||
strd r4, [%1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
|
||||
subs lr, lr, #1 \n\
|
||||
subs %2, %2, #1 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
|
||||
bgt 1b \n\
|
||||
beq 2b \n\
|
||||
ldmfd sp!, {r4, r5, pc} "
|
||||
:
|
||||
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
|
||||
beq 2b "
|
||||
: "+&r" (from), "+&r" (to), "=&r" (tmp)
|
||||
: "2" (PAGE_SIZE / 64 - 1)
|
||||
: "r2", "r3", "r4", "r5", "ip");
|
||||
}
|
||||
|
||||
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -1449,6 +1449,7 @@ config ARCH_HAS_MEM_ENCRYPT
|
|||
config AMD_MEM_ENCRYPT
|
||||
bool "AMD Secure Memory Encryption (SME) support"
|
||||
depends on X86_64 && CPU_SUP_AMD
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
---help---
|
||||
Say yes to enable support for the encryption of system memory.
|
||||
This requires an AMD processor that supports Secure Memory
|
||||
|
@ -1467,10 +1468,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
|
|||
If set to N, then the encryption of system memory can be
|
||||
activated with the mem_encrypt=on command line option.
|
||||
|
||||
config ARCH_USE_MEMREMAP_PROT
|
||||
def_bool y
|
||||
depends on AMD_MEM_ENCRYPT
|
||||
|
||||
# Common NUMA Features
|
||||
config NUMA
|
||||
bool "Numa Memory Allocation and Scheduler Support"
|
||||
|
@ -1903,6 +1900,7 @@ config EFI
|
|||
depends on ACPI
|
||||
select UCS2_STRING
|
||||
select EFI_RUNTIME_WRAPPERS
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
---help---
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
|
|
|
@ -626,7 +626,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
|
|||
return arch_memremap_can_ram_remap(phys_addr, size, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/* Remap memory with encryption */
|
||||
void __init *early_memremap_encrypted(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
|
@ -668,7 +668,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
|
|||
|
||||
return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
|
||||
}
|
||||
#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
|
||||
|
||||
|
|
|
@ -276,7 +276,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
|
|||
return;
|
||||
}
|
||||
|
||||
new = early_memremap(new_phys, new_size);
|
||||
new = early_memremap_prot(new_phys, new_size,
|
||||
pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
|
||||
if (!new) {
|
||||
pr_err("Failed to map new boot services memmap\n");
|
||||
return;
|
||||
|
|
|
@ -3182,8 +3182,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
|
|||
goto invalid_fld;
|
||||
}
|
||||
|
||||
if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
|
||||
tf->protocol = ATA_PROT_NCQ_NODATA;
|
||||
if ((cdb[2 + cdb_offset] & 0x3) == 0) {
|
||||
/*
|
||||
* When T_LENGTH is zero (No data is transferred), dir should
|
||||
* be DMA_NONE.
|
||||
*/
|
||||
if (scmd->sc_data_direction != DMA_NONE) {
|
||||
fp = 2 + cdb_offset;
|
||||
goto invalid_fld;
|
||||
}
|
||||
|
||||
if (ata_is_ncq(tf->protocol))
|
||||
tf->protocol = ATA_PROT_NCQ_NODATA;
|
||||
}
|
||||
|
||||
/* enable LBA */
|
||||
tf->flags |= ATA_TFLAG_LBA;
|
||||
|
|
|
@ -1566,9 +1566,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
unsigned long flags;
|
||||
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
|
||||
struct blkfront_info *info = rinfo->dev_info;
|
||||
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
|
||||
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
|
||||
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
||||
again:
|
||||
|
@ -1584,6 +1587,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
unsigned long id;
|
||||
unsigned int op;
|
||||
|
||||
eoiflag = 0;
|
||||
|
||||
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
|
||||
id = bret.id;
|
||||
|
||||
|
@ -1699,6 +1704,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
|
||||
xen_irq_lateeoi(irq, eoiflag);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
err:
|
||||
|
@ -1706,6 +1713,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
|
||||
/* No EOI in order to avoid further interrupts. */
|
||||
|
||||
pr_alert("%s disabled for further use\n", info->gd->disk_name);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1745,8 +1754,8 @@ static int setup_blkring(struct xenbus_device *dev,
|
|||
if (err)
|
||||
goto fail;
|
||||
|
||||
err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
|
||||
"blkif", rinfo);
|
||||
err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
|
||||
0, "blkif", rinfo);
|
||||
if (err <= 0) {
|
||||
xenbus_dev_fatal(dev, err,
|
||||
"bind_evtchn_to_irqhandler failed");
|
||||
|
|
|
@ -285,7 +285,7 @@ agp_ioc_init(void __iomem *ioc_regs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
lba_find_capability(int cap)
|
||||
{
|
||||
struct _parisc_agp_info *info = &parisc_agp_info;
|
||||
|
@ -370,7 +370,7 @@ fail:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
find_quicksilver(struct device *dev, void *data)
|
||||
{
|
||||
struct parisc_device **lba = data;
|
||||
|
@ -382,7 +382,7 @@ find_quicksilver(struct device *dev, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static int __init
|
||||
parisc_agp_init(void)
|
||||
{
|
||||
extern struct sba_device *sba_list;
|
||||
|
|
|
@ -886,4 +886,4 @@ MODULE_LICENSE("GPL v2");
|
|||
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
|
||||
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
|
||||
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
|
||||
MODULE_ALIAS("platform: " DRIVER_NAME);
|
||||
MODULE_ALIAS("platform:" DRIVER_NAME);
|
||||
|
|
|
@ -27,7 +27,6 @@ struct scpi_pm_domain {
|
|||
struct generic_pm_domain genpd;
|
||||
struct scpi_ops *ops;
|
||||
u32 domain;
|
||||
char name[30];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -121,8 +120,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
|
|||
|
||||
scpi_pd->domain = i;
|
||||
scpi_pd->ops = scpi_ops;
|
||||
sprintf(scpi_pd->name, "%s.%d", np->name, i);
|
||||
scpi_pd->genpd.name = scpi_pd->name;
|
||||
scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
|
||||
"%s.%d", np->name, i);
|
||||
if (!scpi_pd->genpd.name) {
|
||||
dev_err(dev, "Failed to allocate genpd name:%s.%d\n",
|
||||
np->name, i);
|
||||
continue;
|
||||
}
|
||||
scpi_pd->genpd.power_off = scpi_pd_power_off;
|
||||
scpi_pd->genpd.power_on = scpi_pd_power_on;
|
||||
|
||||
|
|
|
@ -1563,6 +1563,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
|||
if (!prop) {
|
||||
dev_dbg(dev,
|
||||
"failed to find data lane mapping, using default\n");
|
||||
/* Set the number of date lanes to 4 by default. */
|
||||
msm_host->num_data_lanes = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -578,15 +578,18 @@ static const struct file_operations i8k_fops = {
|
|||
.unlocked_ioctl = i8k_ioctl,
|
||||
};
|
||||
|
||||
static struct proc_dir_entry *entry;
|
||||
|
||||
static void __init i8k_init_procfs(void)
|
||||
{
|
||||
/* Register the proc entry */
|
||||
proc_create("i8k", 0, NULL, &i8k_fops);
|
||||
entry = proc_create("i8k", 0, NULL, &i8k_fops);
|
||||
}
|
||||
|
||||
static void __exit i8k_exit_procfs(void)
|
||||
{
|
||||
remove_proc_entry("i8k", NULL);
|
||||
if (entry)
|
||||
remove_proc_entry("i8k", NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -424,8 +424,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
|
|||
if (!(ipd & REG_INT_MBRF))
|
||||
return;
|
||||
|
||||
/* ack interrupt */
|
||||
i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
|
||||
/* ack interrupt (read also produces a spurious START flag, clear it too) */
|
||||
i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
|
||||
|
||||
/* Can only handle a maximum of 32 bytes at a time */
|
||||
if (len > 32)
|
||||
|
|
|
@ -79,8 +79,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
|
|||
data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
|
||||
input_abs_get_max(input,
|
||||
axis) + 1,
|
||||
&maximum) |
|
||||
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
|
||||
&maximum);
|
||||
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
|
||||
input_abs_get_fuzz(input, axis),
|
||||
&fuzz);
|
||||
if (data_present)
|
||||
|
@ -90,8 +90,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
|
|||
data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
|
||||
input_abs_get_max(input,
|
||||
axis) + 1,
|
||||
&maximum) |
|
||||
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
|
||||
&maximum);
|
||||
data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
|
||||
input_abs_get_fuzz(input, axis),
|
||||
&fuzz);
|
||||
if (data_present)
|
||||
|
@ -101,11 +101,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
|
|||
data_present = touchscreen_get_prop_u32(dev,
|
||||
"touchscreen-max-pressure",
|
||||
input_abs_get_max(input, axis),
|
||||
&maximum) |
|
||||
touchscreen_get_prop_u32(dev,
|
||||
"touchscreen-fuzz-pressure",
|
||||
input_abs_get_fuzz(input, axis),
|
||||
&fuzz);
|
||||
&maximum);
|
||||
data_present |= touchscreen_get_prop_u32(dev,
|
||||
"touchscreen-fuzz-pressure",
|
||||
input_abs_get_fuzz(input, axis),
|
||||
&fuzz);
|
||||
if (data_present)
|
||||
touchscreen_set_params(input, axis, maximum, fuzz);
|
||||
|
||||
|
|
|
@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
|
|||
|
||||
memcpy(n, dm_block_data(child),
|
||||
dm_bm_block_size(dm_tm_get_bm(info->tm)));
|
||||
dm_tm_unlock(info->tm, child);
|
||||
|
||||
dm_tm_dec(info->tm, dm_block_location(child));
|
||||
dm_tm_unlock(info->tm, child);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,9 +120,13 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
|
|||
struct dma_desc *desc,
|
||||
unsigned int port)
|
||||
{
|
||||
unsigned long desc_flags;
|
||||
|
||||
/* Ports are latched, so write upper address first */
|
||||
spin_lock_irqsave(&priv->desc_lock, desc_flags);
|
||||
tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
|
||||
tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
|
||||
spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
|
||||
}
|
||||
|
||||
/* Ethtool operations */
|
||||
|
@ -1880,6 +1884,7 @@ static int bcm_sysport_open(struct net_device *dev)
|
|||
}
|
||||
|
||||
/* Initialize both hardware and software ring */
|
||||
spin_lock_init(&priv->desc_lock);
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
ret = bcm_sysport_init_tx_ring(priv, i);
|
||||
if (ret) {
|
||||
|
|
|
@ -733,6 +733,7 @@ struct bcm_sysport_priv {
|
|||
int wol_irq;
|
||||
|
||||
/* Transmit rings */
|
||||
spinlock_t desc_lock;
|
||||
struct bcm_sysport_tx_ring *tx_rings;
|
||||
|
||||
/* Receive queue */
|
||||
|
|
|
@ -2911,6 +2911,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return 0;
|
||||
|
||||
err_hw_init:
|
||||
netif_napi_del(&adapter->rx_ring->napi);
|
||||
kfree(adapter->tx_ring);
|
||||
kfree(adapter->rx_ring);
|
||||
err_sw_init:
|
||||
|
|
|
@ -3397,6 +3397,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
|
|||
/* flush pending Tx transactions */
|
||||
ixgbe_clear_tx_pending(hw);
|
||||
|
||||
/* set MDIO speed before talking to the PHY in case it's the 1st time */
|
||||
ixgbe_set_mdio_speed(hw);
|
||||
|
||||
/* PHY ops must be identified and initialized prior to reset */
|
||||
|
||||
/* Identify PHY and related function pointers */
|
||||
|
|
|
@ -649,7 +649,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
|
|||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
|
||||
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
|
||||
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
|
||||
ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
|
||||
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
|
||||
|
@ -661,9 +661,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
|
|||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
|
||||
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
|
||||
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
|
||||
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
|
||||
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
|
||||
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
|
||||
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
|
||||
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
|
||||
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
|
||||
|
|
|
@ -920,11 +920,9 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
|
|||
ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
|
||||
|
||||
if (ret == 0) {
|
||||
if (sig == OTP_INDICATOR_1)
|
||||
offset = offset;
|
||||
else if (sig == OTP_INDICATOR_2)
|
||||
if (sig == OTP_INDICATOR_2)
|
||||
offset += 0x100;
|
||||
else
|
||||
else if (sig != OTP_INDICATOR_1)
|
||||
ret = -EINVAL;
|
||||
if (!ret)
|
||||
ret = lan78xx_read_raw_otp(dev, offset, length, data);
|
||||
|
|
|
@ -323,9 +323,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
|
|||
|
||||
adapter->seq_num++;
|
||||
sleep_cfm_buf->seq_num =
|
||||
cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
|
||||
cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
|
||||
(adapter->seq_num, priv->bss_num,
|
||||
priv->bss_type)));
|
||||
priv->bss_type));
|
||||
|
||||
mwifiex_dbg(adapter, CMD,
|
||||
"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
|
||||
|
|
|
@ -498,10 +498,10 @@ enum mwifiex_channel_flags {
|
|||
|
||||
#define RF_ANTENNA_AUTO 0xFFFF
|
||||
|
||||
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
|
||||
(((seq) & 0x00ff) | \
|
||||
(((num) & 0x000f) << 8)) | \
|
||||
(((type) & 0x000f) << 12); }
|
||||
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
|
||||
((((seq) & 0x00ff) | \
|
||||
(((num) & 0x000f) << 8)) | \
|
||||
(((type) & 0x000f) << 12))
|
||||
|
||||
#define HostCmd_GET_SEQ_NO(seq) \
|
||||
((seq) & HostCmd_SEQ_NUM_MASK)
|
||||
|
|
|
@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
|||
unsigned int rx_queue_max;
|
||||
unsigned int rx_queue_len;
|
||||
unsigned long last_rx_time;
|
||||
unsigned int rx_slots_needed;
|
||||
bool stalled;
|
||||
|
||||
struct xenvif_copy_state rx_copy;
|
||||
|
|
|
@ -33,28 +33,36 @@
|
|||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
/*
|
||||
* Update the needed ring page slots for the first SKB queued.
|
||||
* Note that any call sequence outside the RX thread calling this function
|
||||
* needs to wake up the RX thread via a call of xenvif_kick_thread()
|
||||
* afterwards in order to avoid a race with putting the thread to sleep.
|
||||
*/
|
||||
static void xenvif_update_needed_slots(struct xenvif_queue *queue,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int needed = 0;
|
||||
|
||||
if (skb) {
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
needed++;
|
||||
if (skb->sw_hash)
|
||||
needed++;
|
||||
}
|
||||
|
||||
WRITE_ONCE(queue->rx_slots_needed, needed);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
struct sk_buff *skb;
|
||||
int needed;
|
||||
unsigned long flags;
|
||||
unsigned int needed;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb) {
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
needed = READ_ONCE(queue->rx_slots_needed);
|
||||
if (!needed)
|
||||
return false;
|
||||
}
|
||||
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
needed++;
|
||||
if (skb->sw_hash)
|
||||
needed++;
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
do {
|
||||
prod = queue->rx.sring->req_prod;
|
||||
|
@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
|||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
__skb_queue_tail(&queue->rx_queue, skb);
|
||||
|
||||
queue->rx_queue_len += skb->len;
|
||||
if (queue->rx_queue_len > queue->rx_queue_max) {
|
||||
if (queue->rx_queue_len >= queue->rx_queue_max) {
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
|
||||
kfree_skb(skb);
|
||||
queue->vif->dev->stats.rx_dropped++;
|
||||
} else {
|
||||
if (skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_update_needed_slots(queue, skb);
|
||||
|
||||
__skb_queue_tail(&queue->rx_queue, skb);
|
||||
|
||||
queue->rx_queue_len += skb->len;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
|||
|
||||
skb = __skb_dequeue(&queue->rx_queue);
|
||||
if (skb) {
|
||||
xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
|
||||
|
||||
queue->rx_queue_len -= skb->len;
|
||||
if (queue->rx_queue_len < queue->rx_queue_max) {
|
||||
struct netdev_queue *txq;
|
||||
|
@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
|
|||
break;
|
||||
xenvif_rx_dequeue(queue);
|
||||
kfree_skb(skb);
|
||||
queue->vif->dev->stats.rx_dropped++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -474,27 +491,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
|
|||
xenvif_rx_copy_flush(queue);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
|
||||
static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return prod - cons;
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
|
||||
{
|
||||
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
|
||||
|
||||
return !queue->stalled &&
|
||||
prod - cons < 1 &&
|
||||
xenvif_rx_queue_slots(queue) < needed &&
|
||||
time_after(jiffies,
|
||||
queue->last_rx_time + queue->vif->stall_timeout);
|
||||
}
|
||||
|
||||
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
|
||||
{
|
||||
RING_IDX prod, cons;
|
||||
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
|
||||
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
||||
return queue->stalled && prod - cons >= 1;
|
||||
return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
|
||||
}
|
||||
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
|
||||
|
|
|
@ -142,6 +142,9 @@ struct netfront_queue {
|
|||
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
|
||||
grant_ref_t gref_rx_head;
|
||||
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
|
||||
|
||||
unsigned int rx_rsp_unconsumed;
|
||||
spinlock_t rx_cons_lock;
|
||||
};
|
||||
|
||||
struct netfront_info {
|
||||
|
@ -366,12 +369,13 @@ static int xennet_open(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
static bool xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
{
|
||||
RING_IDX cons, prod;
|
||||
unsigned short id;
|
||||
struct sk_buff *skb;
|
||||
bool more_to_do;
|
||||
bool work_done = false;
|
||||
const struct device *dev = &queue->info->netdev->dev;
|
||||
|
||||
BUG_ON(!netif_carrier_ok(queue->info->netdev));
|
||||
|
@ -388,6 +392,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|||
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
||||
struct xen_netif_tx_response txrsp;
|
||||
|
||||
work_done = true;
|
||||
|
||||
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
|
||||
if (txrsp.status == XEN_NETIF_RSP_NULL)
|
||||
continue;
|
||||
|
@ -431,11 +437,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
|||
|
||||
xennet_maybe_wake_tx(queue);
|
||||
|
||||
return;
|
||||
return work_done;
|
||||
|
||||
err:
|
||||
queue->info->broken = true;
|
||||
dev_alert(dev, "Disabled for further use\n");
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
struct xennet_gnttab_make_txreq {
|
||||
|
@ -755,6 +763,16 @@ static int xennet_close(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_cons_lock, flags);
|
||||
queue->rx.rsp_cons = val;
|
||||
queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
|
||||
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
|
||||
}
|
||||
|
||||
static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
|
||||
grant_ref_t ref)
|
||||
{
|
||||
|
@ -806,7 +824,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
|||
xennet_move_rx_slot(queue, skb, ref);
|
||||
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||
|
||||
queue->rx.rsp_cons = cons;
|
||||
xennet_set_rx_rsp_cons(queue, cons);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -886,7 +904,7 @@ next:
|
|||
}
|
||||
|
||||
if (unlikely(err))
|
||||
queue->rx.rsp_cons = cons + slots;
|
||||
xennet_set_rx_rsp_cons(queue, cons + slots);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -940,7 +958,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
|
||||
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
|
||||
xennet_set_rx_rsp_cons(queue,
|
||||
++cons + skb_queue_len(list));
|
||||
kfree_skb(nskb);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -953,7 +972,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
|||
kfree_skb(nskb);
|
||||
}
|
||||
|
||||
queue->rx.rsp_cons = cons;
|
||||
xennet_set_rx_rsp_cons(queue, cons);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1074,7 +1093,9 @@ err:
|
|||
|
||||
if (unlikely(xennet_set_skb_gso(skb, gso))) {
|
||||
__skb_queue_head(&tmpq, skb);
|
||||
queue->rx.rsp_cons += skb_queue_len(&tmpq);
|
||||
xennet_set_rx_rsp_cons(queue,
|
||||
queue->rx.rsp_cons +
|
||||
skb_queue_len(&tmpq));
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
@ -1098,7 +1119,8 @@ err:
|
|||
|
||||
__skb_queue_tail(&rxq, skb);
|
||||
|
||||
i = ++queue->rx.rsp_cons;
|
||||
i = queue->rx.rsp_cons + 1;
|
||||
xennet_set_rx_rsp_cons(queue, i);
|
||||
work_done++;
|
||||
}
|
||||
|
||||
|
@ -1260,40 +1282,79 @@ static int xennet_set_features(struct net_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
|
||||
static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
|
||||
{
|
||||
struct netfront_queue *queue = dev_id;
|
||||
unsigned long flags;
|
||||
|
||||
if (queue->info->broken)
|
||||
return IRQ_HANDLED;
|
||||
if (unlikely(queue->info->broken))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&queue->tx_lock, flags);
|
||||
xennet_tx_buf_gc(queue);
|
||||
if (xennet_tx_buf_gc(queue))
|
||||
*eoi = 0;
|
||||
spin_unlock_irqrestore(&queue->tx_lock, flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (likely(xennet_handle_tx(dev_id, &eoiflag)))
|
||||
xen_irq_lateeoi(irq, eoiflag);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
|
||||
{
|
||||
unsigned int work_queued;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(queue->info->broken))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_cons_lock, flags);
|
||||
work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
|
||||
if (work_queued > queue->rx_rsp_unconsumed) {
|
||||
queue->rx_rsp_unconsumed = work_queued;
|
||||
*eoi = 0;
|
||||
} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
|
||||
const struct device *dev = &queue->info->netdev->dev;
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
|
||||
dev_alert(dev, "RX producer index going backwards\n");
|
||||
dev_alert(dev, "Disabled for further use\n");
|
||||
queue->info->broken = true;
|
||||
return false;
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
|
||||
|
||||
if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
|
||||
napi_schedule(&queue->napi);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct netfront_queue *queue = dev_id;
|
||||
struct net_device *dev = queue->info->netdev;
|
||||
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (queue->info->broken)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (likely(netif_carrier_ok(dev) &&
|
||||
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
|
||||
napi_schedule(&queue->napi);
|
||||
if (likely(xennet_handle_rx(dev_id, &eoiflag)))
|
||||
xen_irq_lateeoi(irq, eoiflag);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
xennet_tx_interrupt(irq, dev_id);
|
||||
xennet_rx_interrupt(irq, dev_id);
|
||||
unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (xennet_handle_tx(dev_id, &eoiflag) &&
|
||||
xennet_handle_rx(dev_id, &eoiflag))
|
||||
xen_irq_lateeoi(irq, eoiflag);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -1527,9 +1588,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
|
|||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
|
||||
xennet_interrupt,
|
||||
0, queue->info->netdev->name, queue);
|
||||
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
|
||||
xennet_interrupt, 0,
|
||||
queue->info->netdev->name,
|
||||
queue);
|
||||
if (err < 0)
|
||||
goto bind_fail;
|
||||
queue->rx_evtchn = queue->tx_evtchn;
|
||||
|
@ -1557,18 +1619,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
|
|||
|
||||
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
|
||||
"%s-tx", queue->name);
|
||||
err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
|
||||
xennet_tx_interrupt,
|
||||
0, queue->tx_irq_name, queue);
|
||||
err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
|
||||
xennet_tx_interrupt, 0,
|
||||
queue->tx_irq_name, queue);
|
||||
if (err < 0)
|
||||
goto bind_tx_fail;
|
||||
queue->tx_irq = err;
|
||||
|
||||
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
|
||||
"%s-rx", queue->name);
|
||||
err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
|
||||
xennet_rx_interrupt,
|
||||
0, queue->rx_irq_name, queue);
|
||||
err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
|
||||
xennet_rx_interrupt, 0,
|
||||
queue->rx_irq_name, queue);
|
||||
if (err < 0)
|
||||
goto bind_rx_fail;
|
||||
queue->rx_irq = err;
|
||||
|
@ -1670,6 +1732,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
|
|||
|
||||
spin_lock_init(&queue->tx_lock);
|
||||
spin_lock_init(&queue->rx_lock);
|
||||
spin_lock_init(&queue->rx_cons_lock);
|
||||
|
||||
setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
|
||||
(unsigned long)queue);
|
||||
|
|
|
@ -796,9 +796,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
|||
goto out_disable;
|
||||
}
|
||||
|
||||
/* Ensure that all table entries are masked. */
|
||||
msix_mask_all(base, tsize);
|
||||
|
||||
ret = msix_setup_entries(dev, base, entries, nvec, affd);
|
||||
if (ret)
|
||||
goto out_disable;
|
||||
|
@ -821,6 +818,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
|||
/* Set MSI-X enabled bits and unmask the function */
|
||||
pci_intx_for_msi(dev, 0);
|
||||
dev->msix_enabled = 1;
|
||||
|
||||
/*
|
||||
* Ensure that all table entries are masked to prevent
|
||||
* stale entries from firing in a crash kernel.
|
||||
*
|
||||
* Done late to deal with a broken Marvell NVME device
|
||||
* which takes the MSI-X mask bits into account even
|
||||
* when MSI-X is disabled, which prevents MSI delivery.
|
||||
*/
|
||||
msix_mask_all(base, tsize);
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
|
||||
|
||||
pcibios_free_irq(dev);
|
||||
|
@ -847,7 +854,7 @@ out_free:
|
|||
free_msi_irqs(dev);
|
||||
|
||||
out_disable:
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2181,11 +2181,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
|
|||
__func__, param_len, res);
|
||||
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
|
||||
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
|
||||
if (md_len > 2) {
|
||||
off = bd_len + (mselect6 ? 4 : 8);
|
||||
if (md_len > 2 || off >= res) {
|
||||
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
|
||||
return check_condition_result;
|
||||
}
|
||||
off = bd_len + (mselect6 ? 4 : 8);
|
||||
mpage = arr[off] & 0x3f;
|
||||
ps = !!(arr[off] & 0x80);
|
||||
if (ps) {
|
||||
|
|
|
@ -178,7 +178,7 @@ static struct platform_driver tegra_fuse_driver = {
|
|||
};
|
||||
builtin_platform_driver(tegra_fuse_driver);
|
||||
|
||||
bool __init tegra_fuse_read_spare(unsigned int spare)
|
||||
u32 __init tegra_fuse_read_spare(unsigned int spare)
|
||||
{
|
||||
unsigned int offset = fuse->soc->info->spare + spare * 4;
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ struct tegra_fuse {
|
|||
void tegra_init_revision(void);
|
||||
void tegra_init_apbmisc(void);
|
||||
|
||||
bool __init tegra_fuse_read_spare(unsigned int spare);
|
||||
u32 __init tegra_fuse_read_spare(unsigned int spare);
|
||||
u32 __init tegra_fuse_read_early(unsigned int offset);
|
||||
|
||||
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
|
||||
|
|
|
@ -50,6 +50,8 @@ struct xencons_info {
|
|||
struct xenbus_device *xbdev;
|
||||
struct xencons_interface *intf;
|
||||
unsigned int evtchn;
|
||||
XENCONS_RING_IDX out_cons;
|
||||
unsigned int out_cons_same;
|
||||
struct hvc_struct *hvc;
|
||||
int irq;
|
||||
int vtermno;
|
||||
|
@ -151,6 +153,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
|||
XENCONS_RING_IDX cons, prod;
|
||||
int recv = 0;
|
||||
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
|
||||
unsigned int eoiflag = 0;
|
||||
|
||||
if (xencons == NULL)
|
||||
return -EINVAL;
|
||||
intf = xencons->intf;
|
||||
|
@ -170,7 +174,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
|||
mb(); /* read ring before consuming */
|
||||
intf->in_cons = cons;
|
||||
|
||||
notify_daemon(xencons);
|
||||
/*
|
||||
* When to mark interrupt having been spurious:
|
||||
* - there was no new data to be read, and
|
||||
* - the backend did not consume some output bytes, and
|
||||
* - the previous round with no read data didn't see consumed bytes
|
||||
* (we might have a race with an interrupt being in flight while
|
||||
* updating xencons->out_cons, so account for that by allowing one
|
||||
* round without any visible reason)
|
||||
*/
|
||||
if (intf->out_cons != xencons->out_cons) {
|
||||
xencons->out_cons = intf->out_cons;
|
||||
xencons->out_cons_same = 0;
|
||||
}
|
||||
if (recv) {
|
||||
notify_daemon(xencons);
|
||||
} else if (xencons->out_cons_same++ > 1) {
|
||||
eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
}
|
||||
|
||||
xen_irq_lateeoi(xencons->irq, eoiflag);
|
||||
|
||||
return recv;
|
||||
}
|
||||
|
||||
|
@ -399,7 +423,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
info->evtchn = evtchn;
|
||||
irq = bind_evtchn_to_irq(evtchn);
|
||||
irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
info->irq = irq;
|
||||
|
@ -563,7 +587,7 @@ static int __init xen_hvc_init(void)
|
|||
return r;
|
||||
|
||||
info = vtermno_to_xencons(HVC_COOKIE);
|
||||
info->irq = bind_evtchn_to_irq(info->evtchn);
|
||||
info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
|
||||
}
|
||||
if (info->irq < 0)
|
||||
info->irq = 0; /* NO_IRQ */
|
||||
|
|
|
@ -1636,14 +1636,14 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
|||
u8 endp;
|
||||
|
||||
if (w_length > USB_COMP_EP0_BUFSIZ) {
|
||||
if (ctrl->bRequestType == USB_DIR_OUT) {
|
||||
goto done;
|
||||
} else {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
|
||||
w_length = USB_COMP_EP0_BUFSIZ;
|
||||
} else {
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -345,14 +345,14 @@ static int dbgp_setup(struct usb_gadget *gadget,
|
|||
u16 len = 0;
|
||||
|
||||
if (length > DBGP_REQ_LEN) {
|
||||
if (ctrl->bRequestType == USB_DIR_OUT) {
|
||||
return err;
|
||||
} else {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(DBGP_REQ_LEN);
|
||||
length = DBGP_REQ_LEN;
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1339,14 +1339,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
|||
u16 w_length = le16_to_cpu(ctrl->wLength);
|
||||
|
||||
if (w_length > RBUF_SIZE) {
|
||||
if (ctrl->bRequestType == USB_DIR_OUT) {
|
||||
return value;
|
||||
} else {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(RBUF_SIZE);
|
||||
w_length = RBUF_SIZE;
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1222,6 +1222,14 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = NCTRL(2) | RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
|
||||
.driver_info = NCTRL(0) | RSVD(1) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
|
||||
.driver_info = NCTRL(0) | RSVD(1) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
|
||||
.driver_info = NCTRL(2) | RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
|
||||
.driver_info = NCTRL(0) | RSVD(1) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
|
||||
|
|
|
@ -969,7 +969,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
|
|||
if (!parent)
|
||||
return -ENOENT;
|
||||
|
||||
inode_lock(parent);
|
||||
inode_lock_nested(parent, I_MUTEX_PARENT);
|
||||
if (!S_ISDIR(parent->i_mode))
|
||||
goto unlock;
|
||||
|
||||
|
|
|
@ -955,6 +955,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool delegation_hashed(struct nfs4_delegation *dp)
|
||||
{
|
||||
return !(list_empty(&dp->dl_perfile));
|
||||
}
|
||||
|
||||
static bool
|
||||
unhash_delegation_locked(struct nfs4_delegation *dp)
|
||||
{
|
||||
|
@ -962,7 +967,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
|
|||
|
||||
lockdep_assert_held(&state_lock);
|
||||
|
||||
if (list_empty(&dp->dl_perfile))
|
||||
if (!delegation_hashed(dp))
|
||||
return false;
|
||||
|
||||
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
|
||||
|
@ -3881,7 +3886,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
|
|||
* queued for a lease break. Don't queue it again.
|
||||
*/
|
||||
spin_lock(&state_lock);
|
||||
if (dp->dl_time == 0) {
|
||||
if (delegation_hashed(dp) && dp->dl_time == 0) {
|
||||
dp->dl_time = get_seconds();
|
||||
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
|
||||
}
|
||||
|
|
|
@ -686,7 +686,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
|
|||
{
|
||||
int rc = 0;
|
||||
struct sk_buff *skb;
|
||||
static unsigned int failed = 0;
|
||||
unsigned int failed = 0;
|
||||
|
||||
/* NOTE: kauditd_thread takes care of all our locking, we just use
|
||||
* the netlink info passed to us (e.g. sk and portid) */
|
||||
|
@ -703,32 +703,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
|
|||
continue;
|
||||
}
|
||||
|
||||
retry:
|
||||
/* grab an extra skb reference in case of error */
|
||||
skb_get(skb);
|
||||
rc = netlink_unicast(sk, skb, portid, 0);
|
||||
if (rc < 0) {
|
||||
/* fatal failure for our queue flush attempt? */
|
||||
/* send failed - try a few times unless fatal error */
|
||||
if (++failed >= retry_limit ||
|
||||
rc == -ECONNREFUSED || rc == -EPERM) {
|
||||
/* yes - error processing for the queue */
|
||||
sk = NULL;
|
||||
if (err_hook)
|
||||
(*err_hook)(skb);
|
||||
if (!skb_hook)
|
||||
goto out;
|
||||
/* keep processing with the skb_hook */
|
||||
if (rc == -EAGAIN)
|
||||
rc = 0;
|
||||
/* continue to drain the queue */
|
||||
continue;
|
||||
} else
|
||||
/* no - requeue to preserve ordering */
|
||||
skb_queue_head(queue, skb);
|
||||
goto retry;
|
||||
} else {
|
||||
/* it worked - drop the extra reference and continue */
|
||||
/* skb sent - drop the extra reference and continue */
|
||||
consume_skb(skb);
|
||||
failed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return (rc >= 0 ? 0 : rc);
|
||||
}
|
||||
|
||||
|
@ -1518,7 +1516,8 @@ static int __net_init audit_net_init(struct net *net)
|
|||
audit_panic("cannot initialize netlink socket in namespace");
|
||||
return -ENOMEM;
|
||||
}
|
||||
aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
|
||||
/* limit the timeout in case auditd is blocked/stopped */
|
||||
aunet->sk->sk_sndtimeo = HZ / 10;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1235,8 +1235,7 @@ int do_settimeofday64(const struct timespec64 *ts)
|
|||
timekeeping_forward_now(tk);
|
||||
|
||||
xt = tk_xtime(tk);
|
||||
ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
|
||||
ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
|
||||
ts_delta = timespec64_sub(*ts, xt);
|
||||
|
||||
if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/jhash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include "tracing_map.h"
|
||||
#include "trace.h"
|
||||
|
@ -227,6 +228,7 @@ void tracing_map_array_free(struct tracing_map_array *a)
|
|||
for (i = 0; i < a->n_pages; i++) {
|
||||
if (!a->pages[i])
|
||||
break;
|
||||
kmemleak_free(a->pages[i]);
|
||||
free_page((unsigned long)a->pages[i]);
|
||||
}
|
||||
|
||||
|
@ -262,6 +264,7 @@ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
|
|||
a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!a->pages[i])
|
||||
goto free;
|
||||
kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
|
||||
}
|
||||
out:
|
||||
return a;
|
||||
|
|
|
@ -1131,7 +1131,7 @@ config LOCKDEP
|
|||
bool
|
||||
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
||||
select STACKTRACE
|
||||
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
|
||||
select KALLSYMS
|
||||
select KALLSYMS_ALL
|
||||
|
||||
|
@ -1566,7 +1566,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
|
|||
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
|
||||
depends on !X86_64
|
||||
select STACKTRACE
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !SCORE && !X86
|
||||
help
|
||||
Provide stacktrace filter for fault-injection capabilities
|
||||
|
||||
|
@ -1575,7 +1575,7 @@ config LATENCYTOP
|
|||
depends on DEBUG_KERNEL
|
||||
depends on STACKTRACE_SUPPORT
|
||||
depends on PROC_FS
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
|
||||
select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
|
||||
select KALLSYMS
|
||||
select KALLSYMS_ALL
|
||||
select STACKTRACE
|
||||
|
|
|
@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
u32 size = kattr->test.data_size_in;
|
||||
u32 repeat = kattr->test.repeat;
|
||||
u32 retval, duration;
|
||||
int hh_len = ETH_HLEN;
|
||||
struct sk_buff *skb;
|
||||
void *data;
|
||||
int ret;
|
||||
|
@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|||
skb_reset_network_header(skb);
|
||||
|
||||
if (is_l2)
|
||||
__skb_push(skb, ETH_HLEN);
|
||||
__skb_push(skb, hh_len);
|
||||
if (is_direct_pkt_access)
|
||||
bpf_compute_data_end(skb);
|
||||
retval = bpf_test_run(prog, skb, repeat, &duration);
|
||||
if (!is_l2)
|
||||
__skb_push(skb, ETH_HLEN);
|
||||
if (!is_l2) {
|
||||
if (skb_headroom(skb) < hh_len) {
|
||||
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
|
||||
|
||||
if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
memset(__skb_push(skb, hh_len), 0, hh_len);
|
||||
}
|
||||
|
||||
size = skb->len;
|
||||
/* bpf program can never convert linear skb to non-linear */
|
||||
if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
|
||||
|
|
|
@ -1858,7 +1858,6 @@ static int __net_init sit_init_net(struct net *net)
|
|||
return 0;
|
||||
|
||||
err_reg_dev:
|
||||
ipip6_dev_free(sitn->fb_tunnel_dev);
|
||||
free_netdev(sitn->fb_tunnel_dev);
|
||||
err_alloc_dev:
|
||||
return err;
|
||||
|
|
|
@ -109,7 +109,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
|
|||
mgmt->u.action.u.addba_req.start_seq_num =
|
||||
cpu_to_le16(start_seq_num << 4);
|
||||
|
||||
ieee80211_tx_skb(sdata, skb);
|
||||
ieee80211_tx_skb_tid(sdata, skb, tid);
|
||||
}
|
||||
|
||||
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
|
||||
|
|
|
@ -1822,6 +1822,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
if (msg->msg_flags&MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (len == 0) {
|
||||
pr_warn_once("Zero length message leads to an empty skb\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
err = scm_send(sock, msg, &scm, true);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
|
|
@ -666,8 +666,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
|
|||
{
|
||||
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
|
||||
|
||||
nfc_device_iter_exit(iter);
|
||||
kfree(iter);
|
||||
if (iter) {
|
||||
nfc_device_iter_exit(iter);
|
||||
kfree(iter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4477,9 +4477,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|||
}
|
||||
|
||||
out_free_pg_vec:
|
||||
bitmap_free(rx_owner_map);
|
||||
if (pg_vec)
|
||||
if (pg_vec) {
|
||||
bitmap_free(rx_owner_map);
|
||||
free_pg_vec(pg_vec, order, req->tp_block_nr);
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -252,7 +252,7 @@ if ($arch eq "x86_64") {
|
|||
|
||||
} elsif ($arch eq "s390" && $bits == 64) {
|
||||
if ($cc =~ /-DCC_USING_HOTPATCH/) {
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
|
||||
$mcount_adjust = 0;
|
||||
} else {
|
||||
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
|
||||
|
|
|
@ -4334,6 +4334,24 @@ static struct bpf_test tests[] = {
|
|||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
|
||||
},
|
||||
{
|
||||
"make headroom for LWT_XMIT",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 34),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
|
||||
/* split for s390 to succeed */
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 42),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
|
||||
},
|
||||
{
|
||||
"invalid access of tc_classid for LWT_IN",
|
||||
.insns = {
|
||||
|
|
Loading…
Reference in New Issue
Block a user