mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 05:15:23 +02:00
This is the 5.13.14 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEx3LoACgkQONu9yGCS aT7k6A//f6bfwNQy1hDjm3bmX3hrKBJuzHKaypLRJcB7q6UMFRxN47lSIdHdESLa gwtTDOoJOXTL3gR2i8+aOjew0Z8ceyrkiqditC0vnaU4LInV58cDDFA1JekmSn2X Iv56UYTi/itygs75E0lX9BbE8nUBWBNQ77LFCi3iUKqgT7anxtnn3TJ7pl916cIO IyOYAyyLnQVmlKzox54Nmz9BGD4Jn6ef2c6sqYePqbszD2sYQI6P9ankwS6p0PgG 6RXqh7b4zJE96XH8LpBAKfd+9vO8yOMeKAVGShLIbYJu/I595ARhj21dOSPw6avt 3QQjb8dyhVf1b9L24IDT+MXrfCvKxY3eAY2tsZRC2NERHhtpUFq7Mn4CeDMHqwQ/ TJkY2p9ZfLD1U+rmCObfY6bjAiUJoDEmP4X08Tq0Md4QX8IgGefvvqIFKTPc0OaC UXuL5o4noZmgRADUba2dDoR0u3QGPcg8PZ6m6rd6blWijcDeBgE08aKqrTbGGiee B5osM4KuhWFqp/nGRiAtqGHXxhGl6iz1f+7SVzgvyprwIRRfWNh69VQrG+HeD62Z b43VaCoTgpbQI1+ekQbudE9RY6RpMYlGiaVlEWm6n2NWgL843hyeoEQY5B+kSSJW 5VCu5hdV1oph6Yeypt8hur6AKhnO6QothdxcH8VALAmVPD06iuY= =7fp1 -----END PGP SIGNATURE----- Merge tag 'v5.13.14' into v5.13/standard/base This is the 5.13.14 stable release # gpg: Signature made Fri 03 Sep 2021 04:28:42 AM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
ae0f6df1a2
|
@ -24,10 +24,10 @@ allOf:
|
|||
select:
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- sifive,fu540-c000-ccache
|
||||
- sifive,fu740-c000-ccache
|
||||
contains:
|
||||
enum:
|
||||
- sifive,fu540-c000-ccache
|
||||
- sifive,fu740-c000-ccache
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 13
|
||||
SUBLEVEL = 14
|
||||
EXTRAVERSION =
|
||||
NAME = Opossums on Parade
|
||||
|
||||
|
|
|
@ -88,6 +88,8 @@ SECTIONS
|
|||
CPUIDLE_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
}
|
||||
|
|
|
@ -36,3 +36,7 @@
|
|||
};
|
||||
};
|
||||
};
|
||||
|
||||
&tlmm {
|
||||
gpio-reserved-ranges = <85 4>;
|
||||
};
|
||||
|
|
|
@ -33,8 +33,7 @@
|
|||
* EL2.
|
||||
*/
|
||||
.macro __init_el2_timers
|
||||
mrs x0, cnthctl_el2
|
||||
orr x0, x0, #3 // Enable EL1 physical timers
|
||||
mov x0, #3 // Enable EL1 physical timers
|
||||
msr cnthctl_el2, x0
|
||||
msr cntvoff_el2, xzr // Clear virtual offset
|
||||
.endm
|
||||
|
|
|
@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
|
|||
#define __HAVE_ARCH_MEMCPY
|
||||
void * memcpy(void * dest,const void *src,size_t count);
|
||||
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
extern size_t strlen(const char *s);
|
||||
|
||||
#define __HAVE_ARCH_STRCPY
|
||||
extern char *strcpy(char *dest, const char *src);
|
||||
|
||||
#define __HAVE_ARCH_STRNCPY
|
||||
extern char *strncpy(char *dest, const char *src, size_t count);
|
||||
|
||||
#define __HAVE_ARCH_STRCAT
|
||||
extern char *strcat(char *dest, const char *src);
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
extern void *memset(void *, int, size_t);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
|
||||
#include <linux/string.h>
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
EXPORT_SYMBOL(strncpy);
|
||||
EXPORT_SYMBOL(strcat);
|
||||
|
||||
#include <linux/atomic.h>
|
||||
EXPORT_SYMBOL(__xchg8);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# Makefile for parisc-specific library files
|
||||
#
|
||||
|
||||
lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \
|
||||
ucmpdi2.o delay.o string.o
|
||||
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
|
||||
ucmpdi2.o delay.o
|
||||
|
||||
obj-y := iomap.o
|
||||
|
|
72
arch/parisc/lib/memset.c
Normal file
72
arch/parisc/lib/memset.c
Normal file
|
@ -0,0 +1,72 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#include <linux/types.h>
|
||||
#include <asm/string.h>
|
||||
|
||||
#define OPSIZ (BITS_PER_LONG/8)
|
||||
typedef unsigned long op_t;
|
||||
|
||||
void *
|
||||
memset (void *dstpp, int sc, size_t len)
|
||||
{
|
||||
unsigned int c = sc;
|
||||
long int dstp = (long int) dstpp;
|
||||
|
||||
if (len >= 8)
|
||||
{
|
||||
size_t xlen;
|
||||
op_t cccc;
|
||||
|
||||
cccc = (unsigned char) c;
|
||||
cccc |= cccc << 8;
|
||||
cccc |= cccc << 16;
|
||||
if (OPSIZ > 4)
|
||||
/* Do the shift in two steps to avoid warning if long has 32 bits. */
|
||||
cccc |= (cccc << 16) << 16;
|
||||
|
||||
/* There are at least some bytes to set.
|
||||
No need to test for LEN == 0 in this alignment loop. */
|
||||
while (dstp % OPSIZ != 0)
|
||||
{
|
||||
((unsigned char *) dstp)[0] = c;
|
||||
dstp += 1;
|
||||
len -= 1;
|
||||
}
|
||||
|
||||
/* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
|
||||
xlen = len / (OPSIZ * 8);
|
||||
while (xlen > 0)
|
||||
{
|
||||
((op_t *) dstp)[0] = cccc;
|
||||
((op_t *) dstp)[1] = cccc;
|
||||
((op_t *) dstp)[2] = cccc;
|
||||
((op_t *) dstp)[3] = cccc;
|
||||
((op_t *) dstp)[4] = cccc;
|
||||
((op_t *) dstp)[5] = cccc;
|
||||
((op_t *) dstp)[6] = cccc;
|
||||
((op_t *) dstp)[7] = cccc;
|
||||
dstp += 8 * OPSIZ;
|
||||
xlen -= 1;
|
||||
}
|
||||
len %= OPSIZ * 8;
|
||||
|
||||
/* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
|
||||
xlen = len / OPSIZ;
|
||||
while (xlen > 0)
|
||||
{
|
||||
((op_t *) dstp)[0] = cccc;
|
||||
dstp += OPSIZ;
|
||||
xlen -= 1;
|
||||
}
|
||||
len %= OPSIZ;
|
||||
}
|
||||
|
||||
/* Write the last few bytes. */
|
||||
while (len > 0)
|
||||
{
|
||||
((unsigned char *) dstp)[0] = c;
|
||||
dstp += 1;
|
||||
len -= 1;
|
||||
}
|
||||
|
||||
return dstpp;
|
||||
}
|
|
@ -1,136 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* PA-RISC assembly string functions
|
||||
*
|
||||
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
|
||||
*/
|
||||
|
||||
#include <asm/assembly.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.section .text.hot
|
||||
.level PA_ASM_LEVEL
|
||||
|
||||
t0 = r20
|
||||
t1 = r21
|
||||
t2 = r22
|
||||
|
||||
ENTRY_CFI(strlen, frame=0,no_calls)
|
||||
or,COND(<>) arg0,r0,ret0
|
||||
b,l,n .Lstrlen_null_ptr,r0
|
||||
depwi 0,31,2,ret0
|
||||
cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
|
||||
ldw,ma 4(ret0),t0
|
||||
cmpib,tr 0,r0,.Lstrlen_loop
|
||||
uxor,nbz r0,t0,r0
|
||||
.Lstrlen_not_aligned:
|
||||
uaddcm arg0,ret0,t1
|
||||
shladd t1,3,r0,t1
|
||||
mtsar t1
|
||||
depwi -1,%sar,32,t0
|
||||
uxor,nbz r0,t0,r0
|
||||
.Lstrlen_loop:
|
||||
b,l,n .Lstrlen_end_loop,r0
|
||||
ldw,ma 4(ret0),t0
|
||||
cmpib,tr 0,r0,.Lstrlen_loop
|
||||
uxor,nbz r0,t0,r0
|
||||
.Lstrlen_end_loop:
|
||||
extrw,u,<> t0,7,8,r0
|
||||
addib,tr,n -3,ret0,.Lstrlen_out
|
||||
extrw,u,<> t0,15,8,r0
|
||||
addib,tr,n -2,ret0,.Lstrlen_out
|
||||
extrw,u,<> t0,23,8,r0
|
||||
addi -1,ret0,ret0
|
||||
.Lstrlen_out:
|
||||
bv r0(rp)
|
||||
uaddcm ret0,arg0,ret0
|
||||
.Lstrlen_null_ptr:
|
||||
bv,n r0(rp)
|
||||
ENDPROC_CFI(strlen)
|
||||
|
||||
|
||||
ENTRY_CFI(strcpy, frame=0,no_calls)
|
||||
ldb 0(arg1),t0
|
||||
stb t0,0(arg0)
|
||||
ldo 0(arg0),ret0
|
||||
ldo 1(arg1),t1
|
||||
cmpb,= r0,t0,2f
|
||||
ldo 1(arg0),t2
|
||||
1: ldb 0(t1),arg1
|
||||
stb arg1,0(t2)
|
||||
ldo 1(t1),t1
|
||||
cmpb,<> r0,arg1,1b
|
||||
ldo 1(t2),t2
|
||||
2: bv,n r0(rp)
|
||||
ENDPROC_CFI(strcpy)
|
||||
|
||||
|
||||
ENTRY_CFI(strncpy, frame=0,no_calls)
|
||||
ldb 0(arg1),t0
|
||||
stb t0,0(arg0)
|
||||
ldo 1(arg1),t1
|
||||
ldo 0(arg0),ret0
|
||||
cmpb,= r0,t0,2f
|
||||
ldo 1(arg0),arg1
|
||||
1: ldo -1(arg2),arg2
|
||||
cmpb,COND(=),n r0,arg2,2f
|
||||
ldb 0(t1),arg0
|
||||
stb arg0,0(arg1)
|
||||
ldo 1(t1),t1
|
||||
cmpb,<> r0,arg0,1b
|
||||
ldo 1(arg1),arg1
|
||||
2: bv,n r0(rp)
|
||||
ENDPROC_CFI(strncpy)
|
||||
|
||||
|
||||
ENTRY_CFI(strcat, frame=0,no_calls)
|
||||
ldb 0(arg0),t0
|
||||
cmpb,= t0,r0,2f
|
||||
ldo 0(arg0),ret0
|
||||
ldo 1(arg0),arg0
|
||||
1: ldb 0(arg0),t1
|
||||
cmpb,<>,n r0,t1,1b
|
||||
ldo 1(arg0),arg0
|
||||
2: ldb 0(arg1),t2
|
||||
stb t2,0(arg0)
|
||||
ldo 1(arg0),arg0
|
||||
ldb 0(arg1),t0
|
||||
cmpb,<> r0,t0,2b
|
||||
ldo 1(arg1),arg1
|
||||
bv,n r0(rp)
|
||||
ENDPROC_CFI(strcat)
|
||||
|
||||
|
||||
ENTRY_CFI(memset, frame=0,no_calls)
|
||||
copy arg0,ret0
|
||||
cmpb,COND(=) r0,arg0,4f
|
||||
copy arg0,t2
|
||||
cmpb,COND(=) r0,arg2,4f
|
||||
ldo -1(arg2),arg3
|
||||
subi -1,arg3,t0
|
||||
subi 0,t0,t1
|
||||
cmpiclr,COND(>=) 0,t1,arg2
|
||||
ldo -1(t1),arg2
|
||||
extru arg2,31,2,arg0
|
||||
2: stb arg1,0(t2)
|
||||
ldo 1(t2),t2
|
||||
addib,>= -1,arg0,2b
|
||||
ldo -1(arg3),arg3
|
||||
cmpiclr,COND(<=) 4,arg2,r0
|
||||
b,l,n 4f,r0
|
||||
#ifdef CONFIG_64BIT
|
||||
depd,* r0,63,2,arg2
|
||||
#else
|
||||
depw r0,31,2,arg2
|
||||
#endif
|
||||
ldo 1(t2),t2
|
||||
3: stb arg1,-1(t2)
|
||||
stb arg1,0(t2)
|
||||
stb arg1,1(t2)
|
||||
stb arg1,2(t2)
|
||||
addib,COND(>) -4,arg2,3b
|
||||
ldo 4(t2),t2
|
||||
4: bv,n r0(rp)
|
||||
ENDPROC_CFI(memset)
|
||||
|
||||
.end
|
|
@ -97,7 +97,7 @@ config PPC_BOOK3S_64
|
|||
select PPC_HAVE_PMU_SUPPORT
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
|
||||
select ARCH_ENABLE_PMD_SPLIT_PTLOCK
|
||||
select ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
||||
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
||||
select ARCH_SUPPORTS_HUGETLBFS
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/elf.h>
|
||||
|
@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
|
|||
{
|
||||
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
|
||||
|
||||
if (target == current)
|
||||
fstate_save(current, task_pt_regs(current));
|
||||
|
||||
membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
|
||||
membuf_store(&to, fstate->fcsr);
|
||||
return membuf_zero(&to, 4); // explicitly pad
|
||||
|
|
|
@ -4701,7 +4701,7 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
|
|||
return;
|
||||
|
||||
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
|
||||
addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
|
||||
addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
|
||||
|
||||
pci_read_config_dword(pdev, mem_offset, &pci_dword);
|
||||
addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
|
||||
|
|
|
@ -3061,19 +3061,19 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
|
|||
if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&blkcg->lock);
|
||||
spin_lock_irq(&blkcg->lock);
|
||||
iocc->dfl_weight = v * WEIGHT_ONE;
|
||||
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
|
||||
struct ioc_gq *iocg = blkg_to_iocg(blkg);
|
||||
|
||||
if (iocg) {
|
||||
spin_lock_irq(&iocg->ioc->lock);
|
||||
spin_lock(&iocg->ioc->lock);
|
||||
ioc_now(iocg->ioc, &now);
|
||||
weight_updated(iocg, &now);
|
||||
spin_unlock_irq(&iocg->ioc->lock);
|
||||
spin_unlock(&iocg->ioc->lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&blkcg->lock);
|
||||
spin_unlock_irq(&blkcg->lock);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
|
|
@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
unsigned long *next = priv;
|
||||
|
||||
/*
|
||||
* Just do a quick check if it is expired before locking the request in
|
||||
* so we're not unnecessarilly synchronizing across CPUs.
|
||||
*/
|
||||
if (!blk_mq_req_expired(rq, next))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We have reason to believe the request may be expired. Take a
|
||||
* reference on the request to lock this request lifetime into its
|
||||
* currently allocated context to prevent it from being reallocated in
|
||||
* the event the completion by-passes this timeout handler.
|
||||
*
|
||||
* If the reference was already released, then the driver beat the
|
||||
* timeout handler to posting a natural completion.
|
||||
*/
|
||||
if (!refcount_inc_not_zero(&rq->ref))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The request is now locked and cannot be reallocated underneath the
|
||||
* timeout handler's processing. Re-verify this exact request is truly
|
||||
* expired; if it is not expired, then the request was completed and
|
||||
* reallocated as a new request.
|
||||
* blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
|
||||
* be reallocated underneath the timeout handler's processing, then
|
||||
* the expire check is reliable. If the request is not expired, then
|
||||
* it was completed and reallocated as a new request after returning
|
||||
* from blk_mq_check_expired().
|
||||
*/
|
||||
if (blk_mq_req_expired(rq, next))
|
||||
blk_mq_rq_timed_out(rq, reserved);
|
||||
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -4029,23 +4029,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
if (fdc_state[FDC(drive)].rawcmd == 1)
|
||||
fdc_state[FDC(drive)].rawcmd = 2;
|
||||
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
drive_state[drive].last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
|
||||
if (bdev_check_media_change(bdev))
|
||||
floppy_revalidate(bdev->bd_disk);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
|
||||
if (!(mode & FMODE_NDELAY)) {
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
drive_state[drive].last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
|
||||
&drive_state[drive].flags);
|
||||
if (bdev_check_media_change(bdev))
|
||||
floppy_revalidate(bdev->bd_disk);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
}
|
||||
res = -EROFS;
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -EROFS;
|
||||
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
|
||||
goto out;
|
||||
|
||||
mutex_unlock(&open_lock);
|
||||
mutex_unlock(&floppy_mutex);
|
||||
return 0;
|
||||
|
|
|
@ -516,6 +516,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
|||
#define BTUSB_HW_RESET_ACTIVE 12
|
||||
#define BTUSB_TX_WAIT_VND_EVT 13
|
||||
#define BTUSB_WAKEUP_DISABLE 14
|
||||
#define BTUSB_USE_ALT3_FOR_WBS 15
|
||||
|
||||
struct btusb_data {
|
||||
struct hci_dev *hdev;
|
||||
|
@ -1748,16 +1749,20 @@ static void btusb_work(struct work_struct *work)
|
|||
/* Bluetooth USB spec recommends alt 6 (63 bytes), but
|
||||
* many adapters do not support it. Alt 1 appears to
|
||||
* work for all adapters that do not have alt 6, and
|
||||
* which work with WBS at all.
|
||||
* which work with WBS at all. Some devices prefer
|
||||
* alt 3 (HCI payload >= 60 Bytes let air packet
|
||||
* data satisfy 60 bytes), requiring
|
||||
* MTU >= 3 (packets) * 25 (size) - 3 (headers) = 72
|
||||
* see also Core spec 5, vol 4, B 2.1.1 & Table 2.1.
|
||||
*/
|
||||
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
|
||||
/* Because mSBC frames do not need to be aligned to the
|
||||
* SCO packet boundary. If support the Alt 3, use the
|
||||
* Alt 3 for HCI payload >= 60 Bytes let air packet
|
||||
* data satisfy 60 bytes.
|
||||
*/
|
||||
if (new_alts == 1 && btusb_find_altsetting(data, 3))
|
||||
if (btusb_find_altsetting(data, 6))
|
||||
new_alts = 6;
|
||||
else if (btusb_find_altsetting(data, 3) &&
|
||||
hdev->sco_mtu >= 72 &&
|
||||
test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags))
|
||||
new_alts = 3;
|
||||
else
|
||||
new_alts = 1;
|
||||
}
|
||||
|
||||
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
|
||||
|
@ -4733,6 +4738,7 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
* (DEVICE_REMOTE_WAKEUP)
|
||||
*/
|
||||
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
|
||||
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
|
||||
}
|
||||
|
||||
if (!reset)
|
||||
|
|
|
@ -187,7 +187,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
|
|||
init.ops = &usb2_clock_sel_clock_ops;
|
||||
priv->hw.init = &init;
|
||||
|
||||
ret = devm_clk_hw_register(NULL, &priv->hw);
|
||||
ret = devm_clk_hw_register(dev, &priv->hw);
|
||||
if (ret)
|
||||
goto pm_put;
|
||||
|
||||
|
|
|
@ -138,6 +138,7 @@ static const struct of_device_id blacklist[] __initconst = {
|
|||
{ .compatible = "qcom,qcs404", },
|
||||
{ .compatible = "qcom,sc7180", },
|
||||
{ .compatible = "qcom,sdm845", },
|
||||
{ .compatible = "qcom,sm8150", },
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
|
|
|
@ -904,7 +904,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
|
|||
*/
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
|
||||
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
|
||||
|
|
|
@ -2690,12 +2690,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
|
|||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
|
||||
|
||||
mutex_lock(&adev->gfx.gfx_off_mutex);
|
||||
if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
WARN_ON_ONCE(adev->gfx.gfx_off_state);
|
||||
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
|
||||
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
|
||||
adev->gfx.gfx_off_state = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
|
|||
|
||||
mutex_lock(&adev->gfx.gfx_off_mutex);
|
||||
|
||||
if (!enable)
|
||||
adev->gfx.gfx_off_req_count++;
|
||||
else if (adev->gfx.gfx_off_req_count > 0)
|
||||
if (enable) {
|
||||
/* If the count is already 0, it means there's an imbalance bug somewhere.
|
||||
* Note that the bug may be in a different caller than the one which triggers the
|
||||
* WARN_ON_ONCE.
|
||||
*/
|
||||
if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
|
||||
goto unlock;
|
||||
|
||||
adev->gfx.gfx_off_req_count--;
|
||||
|
||||
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
|
||||
} else if (!enable && adev->gfx.gfx_off_state) {
|
||||
if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
|
||||
adev->gfx.gfx_off_state = false;
|
||||
if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
|
||||
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
|
||||
} else {
|
||||
if (adev->gfx.gfx_off_req_count == 0) {
|
||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
||||
|
||||
if (adev->gfx.funcs->init_spm_golden) {
|
||||
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
|
||||
amdgpu_gfx_init_spm_golden(adev);
|
||||
if (adev->gfx.gfx_off_state &&
|
||||
!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
|
||||
adev->gfx.gfx_off_state = false;
|
||||
|
||||
if (adev->gfx.funcs->init_spm_golden) {
|
||||
dev_dbg(adev->dev,
|
||||
"GFXOFF is disabled, re-init SPM golden settings\n");
|
||||
amdgpu_gfx_init_spm_golden(adev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
adev->gfx.gfx_off_req_count++;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&adev->gfx.gfx_off_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -937,11 +937,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.pin_count) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
uint32_t mem_flags = bo->tbo.mem.placement;
|
||||
|
@ -966,6 +961,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
|
||||
* See function amdgpu_display_supported_domains()
|
||||
*/
|
||||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_pin(bo->tbo.base.import_attach);
|
||||
|
||||
|
|
|
@ -5123,6 +5123,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
|
|||
return size;
|
||||
}
|
||||
|
||||
static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
|
||||
return (adev->pdev->device == 0x6860);
|
||||
}
|
||||
|
||||
static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
|
@ -5159,9 +5166,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
|||
}
|
||||
|
||||
out:
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
if (vega10_get_power_profile_mode_quirks(hwmgr))
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
1 << power_profile_mode,
|
||||
NULL);
|
||||
else
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
|
||||
(!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
|
||||
NULL);
|
||||
|
||||
hwmgr->power_profile_mode = power_profile_mode;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -856,8 +856,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
|||
req.request.sequence = req32.request.sequence;
|
||||
req.request.signal = req32.request.signal;
|
||||
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
req32.reply.type = req.reply.type;
|
||||
req32.reply.sequence = req.reply.sequence;
|
||||
|
@ -866,7 +864,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
|||
if (copy_to_user(argp, &req32, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
|
|
|
@ -3833,23 +3833,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
|
|||
|
||||
static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 val;
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
|
||||
drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
|
||||
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
|
||||
return;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
|
||||
drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
|
||||
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
|
||||
return;
|
||||
}
|
||||
|
||||
if (val & HDMI_LINK_STATUS_CHANGED)
|
||||
intel_dp_handle_hdmi_link_status_change(intel_dp);
|
||||
|
|
|
@ -127,6 +127,15 @@ static void intel_timeline_fini(struct rcu_head *rcu)
|
|||
|
||||
i915_vma_put(timeline->hwsp_ggtt);
|
||||
i915_active_fini(&timeline->active);
|
||||
|
||||
/*
|
||||
* A small race exists between intel_gt_retire_requests_timeout and
|
||||
* intel_timeline_exit which could result in the syncmap not getting
|
||||
* free'd. Rather than work to hard to seal this race, simply cleanup
|
||||
* the syncmap on fini.
|
||||
*/
|
||||
i915_syncmap_free(&timeline->sync);
|
||||
|
||||
kfree(timeline);
|
||||
}
|
||||
|
||||
|
|
|
@ -2235,6 +2235,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
|
||||
}
|
||||
|
||||
/* Finish updating head(s)...
|
||||
*
|
||||
* NVD is rather picky about both where window assignments can change,
|
||||
* *and* about certain core and window channel states matching.
|
||||
*
|
||||
* The EFI GOP driver on newer GPUs configures window channels with a
|
||||
* different output format to what we do, and the core channel update
|
||||
* in the assign_windows case above would result in a state mismatch.
|
||||
*
|
||||
* Delay some of the head update until after that point to workaround
|
||||
* the issue. This only affects the initial modeset.
|
||||
*
|
||||
* TODO: handle this better when adding flexible window mapping
|
||||
*/
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
|
||||
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
|
||||
asyh->set.mask, asyh->clr.mask);
|
||||
|
||||
if (asyh->set.mask) {
|
||||
nv50_head_flush_set_wndw(head, asyh);
|
||||
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update plane(s). */
|
||||
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
||||
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
||||
|
|
|
@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head,
|
|||
}
|
||||
|
||||
void
|
||||
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
if (asyh->set.view ) head->func->view (head, asyh);
|
||||
if (asyh->set.mode ) head->func->mode (head, asyh);
|
||||
if (asyh->set.core ) head->func->core_set(head, asyh);
|
||||
if (asyh->set.olut ) {
|
||||
asyh->olut.offset = nv50_lut_load(&head->olut,
|
||||
asyh->olut.buffer,
|
||||
|
@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
|||
asyh->olut.load);
|
||||
head->func->olut_set(head, asyh);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
if (asyh->set.view ) head->func->view (head, asyh);
|
||||
if (asyh->set.mode ) head->func->mode (head, asyh);
|
||||
if (asyh->set.core ) head->func->core_set(head, asyh);
|
||||
if (asyh->set.curs ) head->func->curs_set(head, asyh);
|
||||
if (asyh->set.base ) head->func->base (head, asyh);
|
||||
if (asyh->set.ovly ) head->func->ovly (head, asyh);
|
||||
|
|
|
@ -21,6 +21,7 @@ struct nv50_head {
|
|||
|
||||
struct nv50_head *nv50_head_create(struct drm_device *, int index);
|
||||
void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh);
|
||||
void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh);
|
||||
void nv50_head_flush_clr(struct nv50_head *head,
|
||||
struct nv50_head_atom *asyh, bool flush);
|
||||
|
||||
|
|
|
@ -2624,6 +2624,26 @@ nv174_chipset = {
|
|||
.dma = { 0x00000001, gv100_dma_new },
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv177_chipset = {
|
||||
.name = "GA107",
|
||||
.bar = { 0x00000001, tu102_bar_new },
|
||||
.bios = { 0x00000001, nvkm_bios_new },
|
||||
.devinit = { 0x00000001, ga100_devinit_new },
|
||||
.fb = { 0x00000001, ga102_fb_new },
|
||||
.gpio = { 0x00000001, ga102_gpio_new },
|
||||
.i2c = { 0x00000001, gm200_i2c_new },
|
||||
.imem = { 0x00000001, nv50_instmem_new },
|
||||
.mc = { 0x00000001, ga100_mc_new },
|
||||
.mmu = { 0x00000001, tu102_mmu_new },
|
||||
.pci = { 0x00000001, gp100_pci_new },
|
||||
.privring = { 0x00000001, gm200_privring_new },
|
||||
.timer = { 0x00000001, gk20a_timer_new },
|
||||
.top = { 0x00000001, ga100_top_new },
|
||||
.disp = { 0x00000001, ga102_disp_new },
|
||||
.dma = { 0x00000001, gv100_dma_new },
|
||||
};
|
||||
|
||||
static int
|
||||
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
|
@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
|||
case 0x168: device->chip = &nv168_chipset; break;
|
||||
case 0x172: device->chip = &nv172_chipset; break;
|
||||
case 0x174: device->chip = &nv174_chipset; break;
|
||||
case 0x177: device->chip = &nv177_chipset; break;
|
||||
default:
|
||||
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
|
||||
switch (device->chipset) {
|
||||
|
|
|
@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
||||
{
|
||||
struct nvkm_dp *dp = nvkm_dp(outp);
|
||||
|
|
|
@ -32,6 +32,7 @@ struct nvkm_dp {
|
|||
|
||||
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
|
||||
struct nvkm_outp **);
|
||||
void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
|
||||
|
||||
/* DPCD Receiver Capabilities */
|
||||
#define DPCD_RC00_DPCD_REV 0x00000
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "outp.h"
|
||||
#include "dp.h"
|
||||
#include "ior.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
|
@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
|
|||
if (!ior->arm.head || ior->arm.proto != proto) {
|
||||
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
|
||||
ior->arm.proto, proto);
|
||||
|
||||
/* The EFI GOP driver on Ampere can leave unused DP links routed,
|
||||
* which we don't expect. The DisableLT IED script *should* get
|
||||
* us back to where we need to be.
|
||||
*/
|
||||
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
|
||||
nvkm_dp_disable(outp, ior);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
|
|||
mr->uobject = uobj;
|
||||
atomic_inc(&pd->usecnt);
|
||||
|
||||
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
|
||||
rdma_restrack_set_name(&mr->res, NULL);
|
||||
rdma_restrack_add(&mr->res);
|
||||
uobj->object = mr;
|
||||
|
||||
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
|
||||
|
|
|
@ -1691,6 +1691,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
|
|||
if (nq)
|
||||
nq->budget++;
|
||||
atomic_inc(&rdev->srq_count);
|
||||
spin_lock_init(&srq->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1406,7 +1406,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
|||
memset(&rattr, 0, sizeof(rattr));
|
||||
rc = bnxt_re_register_netdev(rdev);
|
||||
if (rc) {
|
||||
rtnl_unlock();
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to register with netedev: %#x\n", rc);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -356,6 +356,7 @@ static int efa_enable_msix(struct efa_dev *dev)
|
|||
}
|
||||
|
||||
if (irq_num != msix_vecs) {
|
||||
efa_disable_msix(dev);
|
||||
dev_err(&dev->pdev->dev,
|
||||
"Allocated %d MSI-X (out of %d requested)\n",
|
||||
irq_num, msix_vecs);
|
||||
|
|
|
@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
|
|||
static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
||||
{
|
||||
int i;
|
||||
struct sdma_desc *descp;
|
||||
|
||||
/* Handle last descriptor */
|
||||
if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
|
||||
|
@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|||
if (unlikely(tx->num_desc == MAX_DESC))
|
||||
goto enomem;
|
||||
|
||||
tx->descp = kmalloc_array(
|
||||
MAX_DESC,
|
||||
sizeof(struct sdma_desc),
|
||||
GFP_ATOMIC);
|
||||
if (!tx->descp)
|
||||
descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
|
||||
if (!descp)
|
||||
goto enomem;
|
||||
tx->descp = descp;
|
||||
|
||||
/* reserve last descriptor for coalescing */
|
||||
tx->desc_limit = MAX_DESC - 1;
|
||||
|
|
|
@ -4444,7 +4444,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
|
|||
mutex_lock(&mlx5_ib_multiport_mutex);
|
||||
if (mpi->ibdev)
|
||||
mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
|
||||
list_del(&mpi->list);
|
||||
else
|
||||
list_del(&mpi->list);
|
||||
mutex_unlock(&mlx5_ib_multiport_mutex);
|
||||
kfree(mpi);
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|||
goto out;
|
||||
}
|
||||
|
||||
elem = rxe_alloc(&rxe->mc_elem_pool);
|
||||
elem = rxe_alloc_locked(&rxe->mc_elem_pool);
|
||||
if (!elem) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -226,7 +226,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
|
|||
err_free_swnodes:
|
||||
software_node_unregister_nodes(sensor->swnodes);
|
||||
err_put_adev:
|
||||
acpi_dev_put(sensor->adev);
|
||||
acpi_dev_put(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -295,8 +295,7 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
|
|||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
|
||||
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
|
||||
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
|
||||
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
|
||||
.ops = &sdhci_iproc_bcm2711_ops,
|
||||
};
|
||||
|
||||
|
|
|
@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
|
|||
if (id == ESD_EV_CAN_ERROR_EXT) {
|
||||
u8 state = msg->msg.rx.data[0];
|
||||
u8 ecc = msg->msg.rx.data[1];
|
||||
u8 txerr = msg->msg.rx.data[2];
|
||||
u8 rxerr = msg->msg.rx.data[3];
|
||||
u8 rxerr = msg->msg.rx.data[2];
|
||||
u8 txerr = msg->msg.rx.data[3];
|
||||
|
||||
skb = alloc_can_err_skb(priv->netdev, &cf);
|
||||
if (skb == NULL) {
|
||||
|
|
|
@ -1473,9 +1473,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
|
|||
u16 data;
|
||||
u8 gates;
|
||||
|
||||
cur++;
|
||||
next++;
|
||||
|
||||
if (i == schedule->num_entries)
|
||||
gates = initial->gate_mask ^
|
||||
cur->gate_mask;
|
||||
|
@ -1504,6 +1501,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
|
|||
(initial->gate_mask <<
|
||||
TR_GCLCMD_INIT_GATE_STATES_SHIFT);
|
||||
hellcreek_write(hellcreek, data, TR_GCLCMD);
|
||||
|
||||
cur++;
|
||||
next++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1551,7 +1551,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port)
|
|||
/* Calculate difference to admin base time */
|
||||
base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
|
||||
|
||||
return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC;
|
||||
return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC;
|
||||
}
|
||||
|
||||
static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port)
|
||||
|
|
|
@ -1295,11 +1295,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
|
|||
/* Remove this port from the port matrix of the other ports
|
||||
* in the same bridge. If the port is disabled, port matrix
|
||||
* is kept and not being setup until the port becomes enabled.
|
||||
* And the other port's port matrix cannot be broken when the
|
||||
* other port is still a VLAN-aware port.
|
||||
*/
|
||||
if (dsa_is_user_port(ds, i) && i != port &&
|
||||
!dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
|
||||
if (dsa_is_user_port(ds, i) && i != port) {
|
||||
if (dsa_to_port(ds, i)->bridge_dev != bridge)
|
||||
continue;
|
||||
if (priv->ports[i].enable)
|
||||
|
|
|
@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
|
|||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(ndev, "Failed to register netdev\n");
|
||||
goto err;
|
||||
goto err_mdio_remove;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_mdio_remove:
|
||||
xge_mdio_remove(ndev);
|
||||
err:
|
||||
free_netdev(ndev);
|
||||
|
||||
|
|
|
@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
|
|||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
|
||||
#endif
|
||||
|
||||
params[0] = FW_PARAM_PFVF(CLIP_START);
|
||||
|
@ -6788,13 +6789,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
setup_memwin(adapter);
|
||||
err = adap_init0(adapter, 0);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
|
||||
#endif
|
||||
setup_memwin_rdma(adapter);
|
||||
if (err)
|
||||
goto out_unmap_bar;
|
||||
|
||||
setup_memwin_rdma(adapter);
|
||||
|
||||
/* configure SGE_STAT_CFG_A to read WC stats */
|
||||
if (!is_t4(adapter->params.chip))
|
||||
t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
|
||||
|
|
|
@ -564,9 +564,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
|
|||
|
||||
void hclge_cmd_uninit(struct hclge_dev *hdev)
|
||||
{
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
/* wait to ensure that the firmware completes the possible left
|
||||
* over commands.
|
||||
*/
|
||||
msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
|
||||
spin_lock_bh(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock(&hdev->hw.cmq.crq.lock);
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
hclge_cmd_uninit_regs(&hdev->hw);
|
||||
spin_unlock(&hdev->hw.cmq.crq.lock);
|
||||
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "hnae3.h"
|
||||
|
||||
#define HCLGE_CMDQ_TX_TIMEOUT 30000
|
||||
#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
|
||||
#define HCLGE_DESC_DATA_LEN 6
|
||||
|
||||
struct hclge_dev;
|
||||
|
@ -264,6 +265,9 @@ enum hclge_opcode_type {
|
|||
/* Led command */
|
||||
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
|
||||
|
||||
/* clear hardware resource command */
|
||||
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
|
||||
|
||||
/* NCL config command */
|
||||
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
|
||||
|
||||
|
|
|
@ -255,21 +255,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
|||
u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
|
||||
struct hclge_vport *vport = hclge_get_vport(h);
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u8 i, j, pfc_map, *prio_tc;
|
||||
int ret;
|
||||
u8 i;
|
||||
|
||||
memset(pfc, 0, sizeof(*pfc));
|
||||
pfc->pfc_cap = hdev->pfc_max;
|
||||
prio_tc = hdev->tm_info.prio_tc;
|
||||
pfc_map = hdev->tm_info.hw_pfc_map;
|
||||
|
||||
/* Pfc setting is based on TC */
|
||||
for (i = 0; i < hdev->tm_info.num_tc; i++) {
|
||||
for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
|
||||
if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
|
||||
pfc->pfc_en |= BIT(j);
|
||||
}
|
||||
}
|
||||
pfc->pfc_en = hdev->tm_info.pfc_en;
|
||||
|
||||
ret = hclge_pfc_tx_stats_get(hdev, requests);
|
||||
if (ret)
|
||||
|
|
|
@ -2924,12 +2924,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
|||
}
|
||||
|
||||
if (state != hdev->hw.mac.link) {
|
||||
hdev->hw.mac.link = state;
|
||||
client->ops->link_status_change(handle, state);
|
||||
hclge_config_mac_tnl_int(hdev, state);
|
||||
if (rclient && rclient->ops->link_status_change)
|
||||
rclient->ops->link_status_change(rhandle, state);
|
||||
|
||||
hdev->hw.mac.link = state;
|
||||
hclge_push_link_status(hdev);
|
||||
}
|
||||
|
||||
|
@ -9869,7 +9869,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
|
|||
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
|
||||
bool writen_to_tbl)
|
||||
{
|
||||
struct hclge_vport_vlan_cfg *vlan;
|
||||
struct hclge_vport_vlan_cfg *vlan, *tmp;
|
||||
|
||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
|
||||
if (vlan->vlan_id == vlan_id)
|
||||
return;
|
||||
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
|
||||
if (!vlan)
|
||||
|
@ -11167,6 +11171,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
|
|||
}
|
||||
}
|
||||
|
||||
static int hclge_clear_hw_resource(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_desc desc;
|
||||
int ret;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
/* This new command is only supported by new firmware, it will
|
||||
* fail with older firmware. Error value -EOPNOSUPP can only be
|
||||
* returned by older firmware running this command, to keep code
|
||||
* backward compatible we will override this value and return
|
||||
* success.
|
||||
*/
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to clear hw resource, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
struct pci_dev *pdev = ae_dev->pdev;
|
||||
|
@ -11204,6 +11230,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
if (ret)
|
||||
goto err_cmd_uninit;
|
||||
|
||||
ret = hclge_clear_hw_resource(hdev);
|
||||
if (ret)
|
||||
goto err_cmd_uninit;
|
||||
|
||||
ret = hclge_get_cap(hdev);
|
||||
if (ret)
|
||||
goto err_cmd_uninit;
|
||||
|
|
|
@ -505,12 +505,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
|
|||
|
||||
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
|
||||
{
|
||||
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
|
||||
/* wait to ensure that the firmware completes the possible left
|
||||
* over commands.
|
||||
*/
|
||||
msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
|
||||
spin_lock_bh(&hdev->hw.cmq.csq.lock);
|
||||
spin_lock(&hdev->hw.cmq.crq.lock);
|
||||
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
|
||||
hclgevf_cmd_uninit_regs(&hdev->hw);
|
||||
spin_unlock(&hdev->hw.cmq.crq.lock);
|
||||
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
|
||||
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
|
||||
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "hnae3.h"
|
||||
|
||||
#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
|
||||
#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
|
||||
#define HCLGEVF_CMDQ_RX_INVLD_B 0
|
||||
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
|
||||
|
||||
|
|
|
@ -498,10 +498,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
|
|||
link_state =
|
||||
test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
|
||||
if (link_state != hdev->hw.mac.link) {
|
||||
hdev->hw.mac.link = link_state;
|
||||
client->ops->link_status_change(handle, !!link_state);
|
||||
if (rclient && rclient->ops->link_status_change)
|
||||
rclient->ops->link_status_change(rhandle, !!link_state);
|
||||
hdev->hw.mac.link = link_state;
|
||||
}
|
||||
|
||||
clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
|
||||
|
|
|
@ -304,8 +304,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
|||
flag = (u8)msg_q[5];
|
||||
|
||||
/* update upper layer with new link link status */
|
||||
hclgevf_update_link_status(hdev, link_status);
|
||||
hclgevf_update_speed_duplex(hdev, speed, duplex);
|
||||
hclgevf_update_link_status(hdev, link_status);
|
||||
|
||||
if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
|
||||
set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
|
||||
|
|
|
@ -1006,6 +1006,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
|||
{
|
||||
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
|
||||
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
|
||||
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
|
||||
u16 lat_enc_d = 0; /* latency decoded */
|
||||
u16 lat_enc = 0; /* latency encoded */
|
||||
|
||||
if (link) {
|
||||
|
@ -1059,7 +1061,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
|||
E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
|
||||
max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
|
||||
|
||||
if (lat_enc > max_ltr_enc)
|
||||
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
|
||||
(1U << (E1000_LTRV_SCALE_FACTOR *
|
||||
((lat_enc & E1000_LTRV_SCALE_MASK)
|
||||
>> E1000_LTRV_SCALE_SHIFT)));
|
||||
|
||||
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
|
||||
(1U << (E1000_LTRV_SCALE_FACTOR *
|
||||
((max_ltr_enc & E1000_LTRV_SCALE_MASK)
|
||||
>> E1000_LTRV_SCALE_SHIFT)));
|
||||
|
||||
if (lat_enc_d > max_ltr_enc_d)
|
||||
lat_enc = max_ltr_enc;
|
||||
}
|
||||
|
||||
|
@ -4115,13 +4127,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
|
||||
if (!(data & valid_csum_mask)) {
|
||||
data |= valid_csum_mask;
|
||||
ret_val = e1000_write_nvm(hw, word, 1, &data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_update_nvm_checksum(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
e_dbg("NVM Checksum Invalid\n");
|
||||
|
||||
if (hw->mac.type < e1000_pch_cnp) {
|
||||
data |= valid_csum_mask;
|
||||
ret_val = e1000_write_nvm(hw, word, 1, &data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
ret_val = e1000e_update_nvm_checksum(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
}
|
||||
|
||||
return e1000e_validate_nvm_checksum_generic(hw);
|
||||
|
|
|
@ -274,8 +274,11 @@
|
|||
|
||||
/* Latency Tolerance Reporting */
|
||||
#define E1000_LTRV 0x000F8
|
||||
#define E1000_LTRV_VALUE_MASK 0x000003FF
|
||||
#define E1000_LTRV_SCALE_MAX 5
|
||||
#define E1000_LTRV_SCALE_FACTOR 5
|
||||
#define E1000_LTRV_SCALE_SHIFT 10
|
||||
#define E1000_LTRV_SCALE_MASK 0x00001C00
|
||||
#define E1000_LTRV_REQ_SHIFT 15
|
||||
#define E1000_LTRV_NOSNOOP_SHIFT 16
|
||||
#define E1000_LTRV_SEND (1 << 30)
|
||||
|
|
|
@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
|
|||
|
||||
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
|
||||
if (status)
|
||||
return -EIO;
|
||||
/* We failed to locate the PBA, so just skip this entry */
|
||||
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
|
||||
ice_stat_str(status));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -146,6 +146,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter)
|
|||
struct igc_hw *hw = &adapter->hw;
|
||||
u32 ctrl_ext;
|
||||
|
||||
if (!pci_device_is_present(adapter->pdev))
|
||||
return;
|
||||
|
||||
/* Let firmware take over control of h/w */
|
||||
ctrl_ext = rd32(IGC_CTRL_EXT);
|
||||
wr32(IGC_CTRL_EXT,
|
||||
|
@ -4037,26 +4040,29 @@ void igc_down(struct igc_adapter *adapter)
|
|||
|
||||
igc_ptp_suspend(adapter);
|
||||
|
||||
/* disable receives in the hardware */
|
||||
rctl = rd32(IGC_RCTL);
|
||||
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
|
||||
/* flush and sleep below */
|
||||
|
||||
if (pci_device_is_present(adapter->pdev)) {
|
||||
/* disable receives in the hardware */
|
||||
rctl = rd32(IGC_RCTL);
|
||||
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
|
||||
/* flush and sleep below */
|
||||
}
|
||||
/* set trans_start so we don't get spurious watchdogs during reset */
|
||||
netif_trans_update(netdev);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
/* disable transmits in the hardware */
|
||||
tctl = rd32(IGC_TCTL);
|
||||
tctl &= ~IGC_TCTL_EN;
|
||||
wr32(IGC_TCTL, tctl);
|
||||
/* flush both disables and wait for them to finish */
|
||||
wrfl();
|
||||
usleep_range(10000, 20000);
|
||||
if (pci_device_is_present(adapter->pdev)) {
|
||||
/* disable transmits in the hardware */
|
||||
tctl = rd32(IGC_TCTL);
|
||||
tctl &= ~IGC_TCTL_EN;
|
||||
wr32(IGC_TCTL, tctl);
|
||||
/* flush both disables and wait for them to finish */
|
||||
wrfl();
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
igc_irq_disable(adapter);
|
||||
igc_irq_disable(adapter);
|
||||
}
|
||||
|
||||
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
|
||||
|
||||
|
@ -5074,7 +5080,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
|
|||
if (e->command != TC_TAPRIO_CMD_SET_GATES)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
if (e->gate_mask & BIT(i))
|
||||
queue_uses[i]++;
|
||||
|
||||
|
@ -5131,7 +5137,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
|
|||
|
||||
end_time += e->interval;
|
||||
|
||||
for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
struct igc_ring *ring = adapter->tx_ring[i];
|
||||
|
||||
if (!(e->gate_mask & BIT(i)))
|
||||
|
|
|
@ -849,7 +849,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
|
|||
adapter->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
|
||||
|
||||
igc_ptp_time_save(adapter);
|
||||
if (pci_device_is_present(adapter->pdev))
|
||||
igc_ptp_time_save(adapter);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -105,7 +105,7 @@
|
|||
#define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
|
||||
#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
|
||||
#define MVNETA_PORT_STATUS 0x2444
|
||||
#define MVNETA_TX_IN_PRGRS BIT(1)
|
||||
#define MVNETA_TX_IN_PRGRS BIT(0)
|
||||
#define MVNETA_TX_FIFO_EMPTY BIT(8)
|
||||
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
|
||||
/* Only exists on Armada XP and Armada 370 */
|
||||
|
|
|
@ -21,7 +21,7 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
|
|||
ocelot->map[target][reg & REG_MASK] + offset, &val);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_read_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_read_ix);
|
||||
|
||||
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset)
|
|||
regmap_write(ocelot->targets[target],
|
||||
ocelot->map[target][reg & REG_MASK] + offset, val);
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_write_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_write_ix);
|
||||
|
||||
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
|
||||
u32 offset)
|
||||
|
@ -45,7 +45,7 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
|
|||
ocelot->map[target][reg & REG_MASK] + offset,
|
||||
mask, val);
|
||||
}
|
||||
EXPORT_SYMBOL(__ocelot_rmw_ix);
|
||||
EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
|
||||
|
||||
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
|
||||
{
|
||||
|
@ -58,7 +58,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
|
|||
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_readl);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_readl);
|
||||
|
||||
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
||||
{
|
||||
|
@ -69,7 +69,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
|||
|
||||
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_writel);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_writel);
|
||||
|
||||
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
||||
{
|
||||
|
@ -77,7 +77,7 @@ void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
|||
|
||||
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_rmwl);
|
||||
EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
|
||||
|
||||
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
|
||||
u32 reg, u32 offset)
|
||||
|
@ -128,7 +128,7 @@ int ocelot_regfields_init(struct ocelot *ocelot,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_regfields_init);
|
||||
EXPORT_SYMBOL_GPL(ocelot_regfields_init);
|
||||
|
||||
static struct regmap_config ocelot_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
|
@ -148,4 +148,4 @@ struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
|
|||
|
||||
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_regmap_init);
|
||||
EXPORT_SYMBOL_GPL(ocelot_regmap_init);
|
||||
|
|
|
@ -327,6 +327,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
unsigned long flags;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(&p_tx->lock, flags);
|
||||
if (p_tx->b_completing_packet) {
|
||||
rc = -EBUSY;
|
||||
|
@ -500,7 +503,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
|||
unsigned long flags = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return rc;
|
||||
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
|
||||
spin_unlock_irqrestore(&p_rx->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
|
||||
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
|
||||
|
||||
|
@ -821,6 +833,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
|
||||
int rc;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return 0;
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
|
@ -844,6 +859,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
|
|||
u16 new_idx = 0, num_bds = 0;
|
||||
int rc;
|
||||
|
||||
if (!p_ll2_conn)
|
||||
return 0;
|
||||
|
||||
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
||||
|
@ -1725,6 +1743,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
|
|||
if (!p_ll2_conn)
|
||||
return -EINVAL;
|
||||
p_rx = &p_ll2_conn->rx_queue;
|
||||
if (!p_rx->set_prod_addr)
|
||||
return -EIO;
|
||||
|
||||
spin_lock_irqsave(&p_rx->lock, flags);
|
||||
if (!list_empty(&p_rx->free_descq))
|
||||
|
|
|
@ -1285,8 +1285,7 @@ qed_rdma_create_qp(void *rdma_cxt,
|
|||
|
||||
if (!rdma_cxt || !in_params || !out_params ||
|
||||
!p_hwfn->p_rdma_info->active) {
|
||||
DP_ERR(p_hwfn->cdev,
|
||||
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
|
||||
pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
|
||||
rdma_cxt, in_params, out_params);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -4925,6 +4925,10 @@ read_again:
|
|||
|
||||
prefetch(np);
|
||||
|
||||
/* Ensure a valid XSK buffer before proceed */
|
||||
if (!buf->xdp)
|
||||
break;
|
||||
|
||||
if (priv->extend_desc)
|
||||
stmmac_rx_extended_status(priv, &priv->dev->stats,
|
||||
&priv->xstats,
|
||||
|
@ -4945,10 +4949,6 @@ read_again:
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Ensure a valid XSK buffer before proceed */
|
||||
if (!buf->xdp)
|
||||
break;
|
||||
|
||||
/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
|
||||
if (likely(status & rx_not_ls)) {
|
||||
xsk_buff_free(buf->xdp);
|
||||
|
|
|
@ -775,14 +775,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
|||
GFP_KERNEL);
|
||||
if (!plat->est)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&priv->plat->est->lock);
|
||||
} else {
|
||||
memset(plat->est, 0, sizeof(*plat->est));
|
||||
}
|
||||
|
||||
size = qopt->num_entries;
|
||||
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
priv->plat->est->gcl_size = size;
|
||||
priv->plat->est->enable = qopt->enable;
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
s64 delta_ns = qopt->entries[i].interval;
|
||||
|
@ -813,6 +817,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
|||
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
|
||||
}
|
||||
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
/* Adjust for real system time */
|
||||
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time);
|
||||
current_time_ns = timespec64_to_ktime(current_time);
|
||||
|
@ -837,8 +842,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
|||
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
|
||||
priv->plat->est->ctr[1] = (u32)ctr;
|
||||
|
||||
if (fpe && !priv->dma_cap.fpesel)
|
||||
if (fpe && !priv->dma_cap.fpesel) {
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Actual FPE register configuration will be done after FPE handshake
|
||||
* is success.
|
||||
|
@ -847,6 +854,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
|||
|
||||
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "failed to configure EST\n");
|
||||
goto disable;
|
||||
|
@ -862,9 +870,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
|
|||
return 0;
|
||||
|
||||
disable:
|
||||
priv->plat->est->enable = false;
|
||||
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
if (priv->plat->est) {
|
||||
mutex_lock(&priv->plat->est->lock);
|
||||
priv->plat->est->enable = false;
|
||||
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
|
||||
priv->plat->clk_ptp_rate);
|
||||
mutex_unlock(&priv->plat->est->lock);
|
||||
}
|
||||
|
||||
priv->plat->fpe_cfg->enable = false;
|
||||
stmmac_fpe_configure(priv, priv->ioaddr,
|
||||
|
|
|
@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
|
|||
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
|
||||
|
||||
if (need_update) {
|
||||
stmmac_disable_rx_queue(priv, queue);
|
||||
stmmac_disable_tx_queue(priv, queue);
|
||||
napi_disable(&ch->rx_napi);
|
||||
napi_disable(&ch->tx_napi);
|
||||
stmmac_disable_rx_queue(priv, queue);
|
||||
stmmac_disable_tx_queue(priv, queue);
|
||||
}
|
||||
|
||||
set_bit(queue, priv->af_xdp_zc_qps);
|
||||
|
||||
if (need_update) {
|
||||
napi_enable(&ch->rxtx_napi);
|
||||
stmmac_enable_rx_queue(priv, queue);
|
||||
stmmac_enable_tx_queue(priv, queue);
|
||||
napi_enable(&ch->rxtx_napi);
|
||||
|
||||
err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
|
||||
if (err)
|
||||
|
@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
|
|||
need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
|
||||
|
||||
if (need_update) {
|
||||
napi_disable(&ch->rxtx_napi);
|
||||
stmmac_disable_rx_queue(priv, queue);
|
||||
stmmac_disable_tx_queue(priv, queue);
|
||||
synchronize_rcu();
|
||||
napi_disable(&ch->rxtx_napi);
|
||||
}
|
||||
|
||||
xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
|
||||
|
@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
|
|||
clear_bit(queue, priv->af_xdp_zc_qps);
|
||||
|
||||
if (need_update) {
|
||||
napi_enable(&ch->rx_napi);
|
||||
napi_enable(&ch->tx_napi);
|
||||
stmmac_enable_rx_queue(priv, queue);
|
||||
stmmac_enable_tx_queue(priv, queue);
|
||||
napi_enable(&ch->rx_napi);
|
||||
napi_enable(&ch->tx_napi);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -471,7 +471,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
|
|||
write_mii_word(pegasus, 0, 0x1b, &auxmode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
fail:
|
||||
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
|
||||
return ret;
|
||||
|
@ -860,7 +860,7 @@ static int pegasus_open(struct net_device *net)
|
|||
if (!pegasus->rx_skb)
|
||||
goto exit;
|
||||
|
||||
res = set_registers(pegasus, EthID, 6, net->dev_addr);
|
||||
set_registers(pegasus, EthID, 6, net->dev_addr);
|
||||
|
||||
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
|
||||
usb_rcvbulkpipe(pegasus->usb, 1),
|
||||
|
|
|
@ -37,6 +37,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
|||
u32 sha1 = 0;
|
||||
u16 mac_type = 0, rf_id = 0;
|
||||
u8 *pnvm_data = NULL, *tmp;
|
||||
bool hw_match = false;
|
||||
u32 size = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -83,6 +84,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
|||
break;
|
||||
}
|
||||
|
||||
if (hw_match)
|
||||
break;
|
||||
|
||||
mac_type = le16_to_cpup((__le16 *)data);
|
||||
rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
|
||||
|
||||
|
@ -90,15 +94,9 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
|||
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
|
||||
mac_type, rf_id);
|
||||
|
||||
if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
|
||||
rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
|
||||
CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
|
||||
rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
|
||||
hw_match = true;
|
||||
break;
|
||||
case IWL_UCODE_TLV_SEC_RT: {
|
||||
struct iwl_pnvm_section *section = (void *)data;
|
||||
|
@ -149,6 +147,15 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
|
|||
}
|
||||
|
||||
done:
|
||||
if (!hw_match) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
|
||||
CSR_HW_REV_TYPE(trans->hw_rev),
|
||||
CSR_HW_RFID_TYPE(trans->hw_rf_id));
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
|
||||
ret = -ENOENT;
|
||||
|
|
|
@ -1103,12 +1103,80 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
|||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
|
||||
|
||||
/* SoF with JF2 */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
|
||||
|
||||
/* SoF with JF */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
|
||||
|
||||
/* So with GF */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
|
||||
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name)
|
||||
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
|
||||
|
||||
/* So with JF2 */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
|
||||
|
||||
/* So with JF */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
|
||||
IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
|
||||
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
|
||||
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name)
|
||||
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
};
|
||||
|
|
|
@ -985,8 +985,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
|
|||
}
|
||||
}
|
||||
|
||||
/* There should be one of more OPP defined */
|
||||
if (WARN_ON(!count)) {
|
||||
/* There should be one or more OPPs defined */
|
||||
if (!count) {
|
||||
dev_err(dev, "%s: no supported OPPs", __func__);
|
||||
ret = -ENOENT;
|
||||
goto remove_static_opp;
|
||||
}
|
||||
|
|
|
@ -507,6 +507,7 @@ config THINKPAD_ACPI
|
|||
depends on RFKILL || RFKILL = n
|
||||
depends on ACPI_VIDEO || ACPI_VIDEO = n
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
depends on I2C
|
||||
select ACPI_PLATFORM_PROFILE
|
||||
select HWMON
|
||||
select NVRAM
|
||||
|
@ -701,6 +702,7 @@ config INTEL_HID_EVENT
|
|||
tristate "INTEL HID Event"
|
||||
depends on ACPI
|
||||
depends on INPUT
|
||||
depends on I2C
|
||||
select INPUT_SPARSEKMAP
|
||||
help
|
||||
This driver provides support for the Intel HID Event hotkey interface.
|
||||
|
@ -752,6 +754,7 @@ config INTEL_VBTN
|
|||
tristate "INTEL VIRTUAL BUTTON"
|
||||
depends on ACPI
|
||||
depends on INPUT
|
||||
depends on I2C
|
||||
select INPUT_SPARSEKMAP
|
||||
help
|
||||
This driver provides support for the Intel Virtual Button interface.
|
||||
|
|
|
@ -41,6 +41,10 @@ static int wapf = -1;
|
|||
module_param(wapf, uint, 0444);
|
||||
MODULE_PARM_DESC(wapf, "WAPF value");
|
||||
|
||||
static int tablet_mode_sw = -1;
|
||||
module_param(tablet_mode_sw, uint, 0444);
|
||||
MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
|
||||
|
||||
static struct quirk_entry *quirks;
|
||||
|
||||
static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
|
||||
|
@ -458,6 +462,15 @@ static const struct dmi_system_id asus_quirks[] = {
|
|||
},
|
||||
.driver_data = &quirk_asus_use_lid_flip_devid,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUS TP200s / E205SA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "E205SA"),
|
||||
},
|
||||
.driver_data = &quirk_asus_use_lid_flip_devid,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -477,6 +490,21 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
|
|||
else
|
||||
wapf = quirks->wapf;
|
||||
|
||||
switch (tablet_mode_sw) {
|
||||
case 0:
|
||||
quirks->use_kbd_dock_devid = false;
|
||||
quirks->use_lid_flip_devid = false;
|
||||
break;
|
||||
case 1:
|
||||
quirks->use_kbd_dock_devid = true;
|
||||
quirks->use_lid_flip_devid = false;
|
||||
break;
|
||||
case 2:
|
||||
quirks->use_kbd_dock_devid = false;
|
||||
quirks->use_lid_flip_devid = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (quirks->i8042_filter) {
|
||||
ret = i8042_install_filter(quirks->i8042_filter);
|
||||
if (ret) {
|
||||
|
|
76
drivers/platform/x86/dual_accel_detect.h
Normal file
76
drivers/platform/x86/dual_accel_detect.h
Normal file
|
@ -0,0 +1,76 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Helper code to detect 360 degree hinges (yoga) style 2-in-1 devices using 2 accelerometers
|
||||
* to allow the OS to determine the angle between the display and the base of the device.
|
||||
*
|
||||
* On Windows these are read by a special HingeAngleService process which calls undocumented
|
||||
* ACPI methods, to let the firmware know if the 2-in-1 is in tablet- or laptop-mode.
|
||||
* The firmware may use this to disable the kbd and touchpad to avoid spurious input in
|
||||
* tablet-mode as well as to report SW_TABLET_MODE info to the OS.
|
||||
*
|
||||
* Since Linux does not call these undocumented methods, the SW_TABLET_MODE info reported
|
||||
* by various drivers/platform/x86 drivers is incorrect. These drivers use the detection
|
||||
* code in this file to disable SW_TABLET_MODE reporting to avoid reporting broken info
|
||||
* (instead userspace can derive the status itself by directly reading the 2 accels).
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/i2c.h>
|
||||
|
||||
static int dual_accel_i2c_resource_count(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
struct acpi_resource_i2c_serialbus *sb;
|
||||
int *count = data;
|
||||
|
||||
if (i2c_acpi_get_i2c_resource(ares, &sb))
|
||||
*count = *count + 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int dual_accel_i2c_client_count(struct acpi_device *adev)
|
||||
{
|
||||
int ret, count = 0;
|
||||
LIST_HEAD(r);
|
||||
|
||||
ret = acpi_dev_get_resources(adev, &r, dual_accel_i2c_resource_count, &count);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
acpi_dev_free_resource_list(&r);
|
||||
return count;
|
||||
}
|
||||
|
||||
static bool dual_accel_detect_bosc0200(void)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
int count;
|
||||
|
||||
adev = acpi_dev_get_first_match_dev("BOSC0200", NULL, -1);
|
||||
if (!adev)
|
||||
return false;
|
||||
|
||||
count = dual_accel_i2c_client_count(adev);
|
||||
|
||||
acpi_dev_put(adev);
|
||||
|
||||
return count == 2;
|
||||
}
|
||||
|
||||
static bool dual_accel_detect(void)
|
||||
{
|
||||
/* Systems which use a pair of accels with KIOX010A / KIOX020A ACPI ids */
|
||||
if (acpi_dev_present("KIOX010A", NULL, -1) &&
|
||||
acpi_dev_present("KIOX020A", NULL, -1))
|
||||
return true;
|
||||
|
||||
/* Systems which use a single DUAL250E ACPI device to model 2 accels */
|
||||
if (acpi_dev_present("DUAL250E", NULL, -1))
|
||||
return true;
|
||||
|
||||
/* Systems which use a single BOSC0200 ACPI device to model 2 accels */
|
||||
if (dual_accel_detect_bosc0200())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
|
@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
|
|||
}}
|
||||
|
||||
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
|
||||
|
@ -147,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
|
|||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
|
||||
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
|
||||
{ }
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/suspend.h>
|
||||
#include "dual_accel_detect.h"
|
||||
|
||||
/* When NOT in tablet mode, VGBS returns with the flag 0x40 */
|
||||
#define TABLET_MODE_FLAG BIT(6)
|
||||
|
@ -121,6 +122,7 @@ struct intel_hid_priv {
|
|||
struct input_dev *array;
|
||||
struct input_dev *switches;
|
||||
bool wakeup_mode;
|
||||
bool dual_accel;
|
||||
};
|
||||
|
||||
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
|
||||
|
@ -450,22 +452,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
|
|||
* SW_TABLET_MODE report, in these cases we enable support when receiving
|
||||
* the first event instead of during driver setup.
|
||||
*
|
||||
* Some 360 degree hinges (yoga) style 2-in-1 devices use 2 accelerometers
|
||||
* to allow the OS to determine the angle between the display and the base
|
||||
* of the device. On Windows these are read by a special HingeAngleService
|
||||
* process which calls an ACPI DSM (Device Specific Method) on the
|
||||
* ACPI KIOX010A device node for the sensor in the display, to let the
|
||||
* firmware know if the 2-in-1 is in tablet- or laptop-mode so that it can
|
||||
* disable the kbd and touchpad to avoid spurious input in tablet-mode.
|
||||
*
|
||||
* The linux kxcjk1013 driver calls the DSM for this once at probe time
|
||||
* to ensure that the builtin kbd and touchpad work. On some devices this
|
||||
* causes a "spurious" 0xcd event on the intel-hid ACPI dev. In this case
|
||||
* there is not a functional tablet-mode switch, so we should not register
|
||||
* the tablet-mode switch device.
|
||||
* See dual_accel_detect.h for more info on the dual_accel check.
|
||||
*/
|
||||
if (!priv->switches && (event == 0xcc || event == 0xcd) &&
|
||||
!acpi_dev_present("KIOX010A", NULL, -1)) {
|
||||
if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
|
||||
dev_info(&device->dev, "switch event received, enable switches supports\n");
|
||||
err = intel_hid_switches_setup(device);
|
||||
if (err)
|
||||
|
@ -606,6 +595,8 @@ static int intel_hid_probe(struct platform_device *device)
|
|||
return -ENOMEM;
|
||||
dev_set_drvdata(&device->dev, priv);
|
||||
|
||||
priv->dual_accel = dual_accel_detect();
|
||||
|
||||
err = intel_hid_input_setup(device);
|
||||
if (err) {
|
||||
pr_err("Failed to setup Intel HID hotkeys\n");
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/suspend.h>
|
||||
#include "dual_accel_detect.h"
|
||||
|
||||
/* Returned when NOT in tablet mode on some HP Stream x360 11 models */
|
||||
#define VGBS_TABLET_MODE_FLAG_ALT 0x10
|
||||
|
@ -66,6 +67,7 @@ static const struct key_entry intel_vbtn_switchmap[] = {
|
|||
struct intel_vbtn_priv {
|
||||
struct input_dev *buttons_dev;
|
||||
struct input_dev *switches_dev;
|
||||
bool dual_accel;
|
||||
bool has_buttons;
|
||||
bool has_switches;
|
||||
bool wakeup_mode;
|
||||
|
@ -160,6 +162,10 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
|
|||
input_dev = priv->buttons_dev;
|
||||
} else if ((ke = sparse_keymap_entry_from_scancode(priv->switches_dev, event))) {
|
||||
if (!priv->has_switches) {
|
||||
/* See dual_accel_detect.h for more info */
|
||||
if (priv->dual_accel)
|
||||
return;
|
||||
|
||||
dev_info(&device->dev, "Registering Intel Virtual Switches input-dev after receiving a switch event\n");
|
||||
ret = input_register_device(priv->switches_dev);
|
||||
if (ret)
|
||||
|
@ -248,11 +254,15 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
|
|||
{} /* Array terminator */
|
||||
};
|
||||
|
||||
static bool intel_vbtn_has_switches(acpi_handle handle)
|
||||
static bool intel_vbtn_has_switches(acpi_handle handle, bool dual_accel)
|
||||
{
|
||||
unsigned long long vgbs;
|
||||
acpi_status status;
|
||||
|
||||
/* See dual_accel_detect.h for more info */
|
||||
if (dual_accel)
|
||||
return false;
|
||||
|
||||
if (!dmi_check_system(dmi_switches_allow_list))
|
||||
return false;
|
||||
|
||||
|
@ -263,13 +273,14 @@ static bool intel_vbtn_has_switches(acpi_handle handle)
|
|||
static int intel_vbtn_probe(struct platform_device *device)
|
||||
{
|
||||
acpi_handle handle = ACPI_HANDLE(&device->dev);
|
||||
bool has_buttons, has_switches;
|
||||
bool dual_accel, has_buttons, has_switches;
|
||||
struct intel_vbtn_priv *priv;
|
||||
acpi_status status;
|
||||
int err;
|
||||
|
||||
dual_accel = dual_accel_detect();
|
||||
has_buttons = acpi_has_method(handle, "VBDL");
|
||||
has_switches = intel_vbtn_has_switches(handle);
|
||||
has_switches = intel_vbtn_has_switches(handle, dual_accel);
|
||||
|
||||
if (!has_buttons && !has_switches) {
|
||||
dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
|
||||
|
@ -281,6 +292,7 @@ static int intel_vbtn_probe(struct platform_device *device)
|
|||
return -ENOMEM;
|
||||
dev_set_drvdata(&device->dev, priv);
|
||||
|
||||
priv->dual_accel = dual_accel;
|
||||
priv->has_buttons = has_buttons;
|
||||
priv->has_switches = has_switches;
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <acpi/battery.h>
|
||||
#include <acpi/video.h>
|
||||
#include "dual_accel_detect.h"
|
||||
|
||||
/* ThinkPad CMOS commands */
|
||||
#define TP_CMOS_VOLUME_DOWN 0
|
||||
|
@ -3232,7 +3233,7 @@ static int hotkey_init_tablet_mode(void)
|
|||
* the laptop/tent/tablet mode to the EC. The bmc150 iio driver
|
||||
* does not support this, so skip the hotkey on these models.
|
||||
*/
|
||||
if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
|
||||
if (has_tablet_mode && !dual_accel_detect())
|
||||
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
|
||||
type = "GMMS";
|
||||
} else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
|
||||
|
|
|
@ -808,12 +808,15 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
|||
ret = scsi_device_set_state(sdev, state);
|
||||
/*
|
||||
* If the device state changes to SDEV_RUNNING, we need to
|
||||
* rescan the device to revalidate it, and run the queue to
|
||||
* avoid I/O hang.
|
||||
* run the queue to avoid I/O hang, and rescan the device
|
||||
* to revalidate it. Running the queue first is necessary
|
||||
* because another thread may be waiting inside
|
||||
* blk_mq_freeze_queue_wait() and because that call may be
|
||||
* waiting for pending I/O to finish.
|
||||
*/
|
||||
if (ret == 0 && state == SDEV_RUNNING) {
|
||||
scsi_rescan_device(dev);
|
||||
blk_mq_run_hw_queues(sdev->request_queue, true);
|
||||
scsi_rescan_device(dev);
|
||||
}
|
||||
mutex_unlock(&sdev->state_mutex);
|
||||
|
||||
|
|
|
@ -246,6 +246,8 @@ int vt_waitactive(int n)
|
|||
*
|
||||
* XXX It should at least call into the driver, fbdev's definitely need to
|
||||
* restore their engine state. --BenH
|
||||
*
|
||||
* Called with the console lock held.
|
||||
*/
|
||||
static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
|
||||
{
|
||||
|
@ -262,7 +264,6 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* FIXME: this needs the console lock extending */
|
||||
if (vc->vc_mode == mode)
|
||||
return 0;
|
||||
|
||||
|
@ -271,12 +272,10 @@ static int vt_kdsetmode(struct vc_data *vc, unsigned long mode)
|
|||
return 0;
|
||||
|
||||
/* explicitly blank/unblank the screen if switching modes */
|
||||
console_lock();
|
||||
if (mode == KD_TEXT)
|
||||
do_unblank_screen(1);
|
||||
else
|
||||
do_blank_screen(1);
|
||||
console_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -378,7 +377,10 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
|
|||
if (!perm)
|
||||
return -EPERM;
|
||||
|
||||
return vt_kdsetmode(vc, arg);
|
||||
console_lock();
|
||||
ret = vt_kdsetmode(vc, arg);
|
||||
console_unlock();
|
||||
return ret;
|
||||
|
||||
case KDGETMODE:
|
||||
return put_user(vc->vc_mode, (int __user *)arg);
|
||||
|
|
|
@ -940,19 +940,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
|
|||
|
||||
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
|
||||
{
|
||||
struct dwc3_trb *tmp;
|
||||
u8 trbs_left;
|
||||
|
||||
/*
|
||||
* If enqueue & dequeue are equal than it is either full or empty.
|
||||
*
|
||||
* One way to know for sure is if the TRB right before us has HWO bit
|
||||
* set or not. If it has, then we're definitely full and can't fit any
|
||||
* more transfers in our ring.
|
||||
* If the enqueue & dequeue are equal then the TRB ring is either full
|
||||
* or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
|
||||
* pending to be processed by the driver.
|
||||
*/
|
||||
if (dep->trb_enqueue == dep->trb_dequeue) {
|
||||
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
|
||||
if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
|
||||
/*
|
||||
* If there is any request remained in the started_list at
|
||||
* this point, that means there is no TRB available.
|
||||
*/
|
||||
if (!list_empty(&dep->started_list))
|
||||
return 0;
|
||||
|
||||
return DWC3_TRB_NUM - 1;
|
||||
|
@ -2243,10 +2243,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
|||
|
||||
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
|
||||
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
|
||||
if (ret == 0) {
|
||||
dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
if (ret == 0)
|
||||
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2458,6 +2456,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
|
|||
/* begin to receive SETUP packets */
|
||||
dwc->ep0state = EP0_SETUP_PHASE;
|
||||
dwc->link_state = DWC3_LINK_STATE_SS_DIS;
|
||||
dwc->delayed_status = false;
|
||||
dwc3_ep0_out_start(dwc);
|
||||
|
||||
dwc3_gadget_enable_irq(dwc);
|
||||
|
|
|
@ -312,8 +312,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
|
|||
if (!prm->ep_enabled)
|
||||
return;
|
||||
|
||||
prm->ep_enabled = false;
|
||||
|
||||
audio_dev = uac->audio_dev;
|
||||
params = &audio_dev->params;
|
||||
|
||||
|
@ -331,11 +329,12 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
|
|||
}
|
||||
}
|
||||
|
||||
prm->ep_enabled = false;
|
||||
|
||||
if (usb_ep_disable(ep))
|
||||
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
}
|
||||
|
||||
|
||||
int u_audio_start_capture(struct g_audio *audio_dev)
|
||||
{
|
||||
struct snd_uac_chip *uac = audio_dev->uac;
|
||||
|
|
|
@ -207,7 +207,8 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
|
|||
return 0;
|
||||
|
||||
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
|
||||
return 0;
|
||||
dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
|
||||
return -ENOENT;
|
||||
|
||||
case RENESAS_ROM_STATUS_ERROR: /* Error State */
|
||||
default: /* All other states are marked as "Reserved states" */
|
||||
|
@ -224,14 +225,6 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
|
|||
u8 fw_state;
|
||||
int err;
|
||||
|
||||
/* Check if device has ROM and loaded, if so skip everything */
|
||||
err = renesas_check_rom(pdev);
|
||||
if (err) { /* we have rom */
|
||||
err = renesas_check_rom_state(pdev);
|
||||
if (!err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test if the device is actually needing the firmware. As most
|
||||
* BIOSes will initialize the device for us. If the device is
|
||||
|
@ -591,21 +584,39 @@ int renesas_xhci_check_request_fw(struct pci_dev *pdev,
|
|||
(struct xhci_driver_data *)id->driver_data;
|
||||
const char *fw_name = driver_data->firmware;
|
||||
const struct firmware *fw;
|
||||
bool has_rom;
|
||||
int err;
|
||||
|
||||
/* Check if device has ROM and loaded, if so skip everything */
|
||||
has_rom = renesas_check_rom(pdev);
|
||||
if (has_rom) {
|
||||
err = renesas_check_rom_state(pdev);
|
||||
if (!err)
|
||||
return 0;
|
||||
else if (err != -ENOENT)
|
||||
has_rom = false;
|
||||
}
|
||||
|
||||
err = renesas_fw_check_running(pdev);
|
||||
/* Continue ahead, if the firmware is already running. */
|
||||
if (err == 0)
|
||||
return 0;
|
||||
|
||||
/* no firmware interface available */
|
||||
if (err != 1)
|
||||
return err;
|
||||
return has_rom ? 0 : err;
|
||||
|
||||
pci_dev_get(pdev);
|
||||
err = request_firmware(&fw, fw_name, &pdev->dev);
|
||||
err = firmware_request_nowarn(&fw, fw_name, &pdev->dev);
|
||||
pci_dev_put(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
|
||||
if (has_rom) {
|
||||
dev_info(&pdev->dev, "failed to load firmware %s, fallback to ROM\n",
|
||||
fw_name);
|
||||
return 0;
|
||||
}
|
||||
dev_err(&pdev->dev, "failed to load firmware %s: %d\n",
|
||||
fw_name, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -851,7 +851,6 @@ static struct usb_serial_driver ch341_device = {
|
|||
.owner = THIS_MODULE,
|
||||
.name = "ch341-uart",
|
||||
},
|
||||
.bulk_in_size = 512,
|
||||
.id_table = id_table,
|
||||
.num_ports = 1,
|
||||
.open = ch341_open,
|
||||
|
|
|
@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
|
|||
.driver_info = RSVD(4) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(6) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
||||
|
|
|
@ -341,6 +341,7 @@ struct tcpm_port {
|
|||
bool vbus_source;
|
||||
bool vbus_charge;
|
||||
|
||||
/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
|
||||
bool send_discover;
|
||||
bool op_vsafe5v;
|
||||
|
||||
|
@ -370,6 +371,7 @@ struct tcpm_port {
|
|||
struct hrtimer send_discover_timer;
|
||||
struct kthread_work send_discover_work;
|
||||
bool state_machine_running;
|
||||
/* Set to true when VDM State Machine has following actions. */
|
||||
bool vdm_sm_running;
|
||||
|
||||
struct completion tx_complete;
|
||||
|
@ -1403,6 +1405,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
|
|||
/* Set ready, vdm state machine will actually send */
|
||||
port->vdm_retries = 0;
|
||||
port->vdm_state = VDM_STATE_READY;
|
||||
port->vdm_sm_running = true;
|
||||
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
@ -1645,7 +1648,6 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
|||
rlen = 1;
|
||||
} else {
|
||||
tcpm_register_partner_altmodes(port);
|
||||
port->vdm_sm_running = false;
|
||||
}
|
||||
break;
|
||||
case CMD_ENTER_MODE:
|
||||
|
@ -1693,14 +1695,12 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
|||
(VDO_SVDM_VERS(svdm_version));
|
||||
break;
|
||||
}
|
||||
port->vdm_sm_running = false;
|
||||
break;
|
||||
default:
|
||||
response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
|
||||
rlen = 1;
|
||||
response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
|
||||
(VDO_SVDM_VERS(svdm_version));
|
||||
port->vdm_sm_running = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1741,6 +1741,20 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
|
|||
}
|
||||
|
||||
if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
|
||||
/*
|
||||
* Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
|
||||
* advance because we are dropping the lock but may send VDMs soon.
|
||||
* For the cases of INIT received:
|
||||
* - If no response to send, it will be cleared later in this function.
|
||||
* - If there are responses to send, it will be cleared in the state machine.
|
||||
* For the cases of RSP received:
|
||||
* - If no further INIT to send, it will be cleared later in this function.
|
||||
* - Otherwise, it will be cleared in the state machine if timeout or it will go
|
||||
* back here until no further INIT to send.
|
||||
* For the cases of unknown type received:
|
||||
* - We will send NAK and the flag will be cleared in the state machine.
|
||||
*/
|
||||
port->vdm_sm_running = true;
|
||||
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
|
||||
} else {
|
||||
if (port->negotiated_rev >= PD_REV30)
|
||||
|
@ -1809,6 +1823,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
|
|||
|
||||
if (rlen > 0)
|
||||
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
|
||||
else
|
||||
port->vdm_sm_running = false;
|
||||
}
|
||||
|
||||
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
|
||||
|
@ -1874,8 +1890,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
|
|||
* if there's traffic or we're not in PDO ready state don't send
|
||||
* a VDM.
|
||||
*/
|
||||
if (port->state != SRC_READY && port->state != SNK_READY)
|
||||
if (port->state != SRC_READY && port->state != SNK_READY) {
|
||||
port->vdm_sm_running = false;
|
||||
break;
|
||||
}
|
||||
|
||||
/* TODO: AMS operation for Unstructured VDM */
|
||||
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
|
||||
|
@ -2528,10 +2546,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
TYPEC_PWR_MODE_PD,
|
||||
port->pps_data.active,
|
||||
port->supply_voltage);
|
||||
/* Set VDM running flag ASAP */
|
||||
if (port->data_role == TYPEC_HOST &&
|
||||
port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
tcpm_set_state(port, SNK_READY, 0);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2569,14 +2583,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
switch (port->state) {
|
||||
case SNK_NEGOTIATE_CAPABILITIES:
|
||||
/* USB PD specification, Figure 8-43 */
|
||||
if (port->explicit_contract) {
|
||||
if (port->explicit_contract)
|
||||
next_state = SNK_READY;
|
||||
if (port->data_role == TYPEC_HOST &&
|
||||
port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
} else {
|
||||
else
|
||||
next_state = SNK_WAIT_CAPABILITIES;
|
||||
}
|
||||
|
||||
/* Threshold was relaxed before sending Request. Restore it back. */
|
||||
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
|
||||
|
@ -2591,10 +2601,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
port->pps_status = (type == PD_CTRL_WAIT ?
|
||||
-EAGAIN : -EOPNOTSUPP);
|
||||
|
||||
if (port->data_role == TYPEC_HOST &&
|
||||
port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
|
||||
/* Threshold was relaxed before sending Request. Restore it back. */
|
||||
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
|
||||
port->pps_data.active,
|
||||
|
@ -2670,10 +2676,6 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
}
|
||||
break;
|
||||
case DR_SWAP_SEND:
|
||||
if (port->data_role == TYPEC_DEVICE &&
|
||||
port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
|
||||
tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
|
||||
break;
|
||||
case PR_SWAP_SEND:
|
||||
|
@ -2711,7 +2713,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
PD_MSG_CTRL_NOT_SUPP,
|
||||
NONE_AMS);
|
||||
} else {
|
||||
if (port->vdm_sm_running) {
|
||||
if (port->send_discover) {
|
||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||
break;
|
||||
}
|
||||
|
@ -2727,7 +2729,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
PD_MSG_CTRL_NOT_SUPP,
|
||||
NONE_AMS);
|
||||
} else {
|
||||
if (port->vdm_sm_running) {
|
||||
if (port->send_discover) {
|
||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||
break;
|
||||
}
|
||||
|
@ -2736,7 +2738,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
|||
}
|
||||
break;
|
||||
case PD_CTRL_VCONN_SWAP:
|
||||
if (port->vdm_sm_running) {
|
||||
if (port->send_discover) {
|
||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||
break;
|
||||
}
|
||||
|
@ -4470,18 +4472,20 @@ static void run_state_machine(struct tcpm_port *port)
|
|||
/* DR_Swap states */
|
||||
case DR_SWAP_SEND:
|
||||
tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
|
||||
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
|
||||
port->send_discover = true;
|
||||
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
|
||||
PD_T_SENDER_RESPONSE);
|
||||
break;
|
||||
case DR_SWAP_ACCEPT:
|
||||
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
|
||||
/* Set VDM state machine running flag ASAP */
|
||||
if (port->data_role == TYPEC_DEVICE && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
|
||||
port->send_discover = true;
|
||||
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
|
||||
break;
|
||||
case DR_SWAP_SEND_TIMEOUT:
|
||||
tcpm_swap_complete(port, -ETIMEDOUT);
|
||||
port->send_discover = false;
|
||||
tcpm_ams_finish(port);
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
|
@ -4493,7 +4497,6 @@ static void run_state_machine(struct tcpm_port *port)
|
|||
} else {
|
||||
tcpm_set_roles(port, true, port->pwr_role,
|
||||
TYPEC_HOST);
|
||||
port->send_discover = true;
|
||||
}
|
||||
tcpm_ams_finish(port);
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
|
@ -4633,8 +4636,6 @@ static void run_state_machine(struct tcpm_port *port)
|
|||
break;
|
||||
case VCONN_SWAP_SEND_TIMEOUT:
|
||||
tcpm_swap_complete(port, -ETIMEDOUT);
|
||||
if (port->data_role == TYPEC_HOST && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
case VCONN_SWAP_START:
|
||||
|
@ -4650,14 +4651,10 @@ static void run_state_machine(struct tcpm_port *port)
|
|||
case VCONN_SWAP_TURN_ON_VCONN:
|
||||
tcpm_set_vconn(port, true);
|
||||
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
|
||||
if (port->data_role == TYPEC_HOST && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
case VCONN_SWAP_TURN_OFF_VCONN:
|
||||
tcpm_set_vconn(port, false);
|
||||
if (port->data_role == TYPEC_HOST && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
|
||||
|
@ -4665,8 +4662,6 @@ static void run_state_machine(struct tcpm_port *port)
|
|||
case PR_SWAP_CANCEL:
|
||||
case VCONN_SWAP_CANCEL:
|
||||
tcpm_swap_complete(port, port->swap_status);
|
||||
if (port->data_role == TYPEC_HOST && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
if (port->pwr_role == TYPEC_SOURCE)
|
||||
tcpm_set_state(port, SRC_READY, 0);
|
||||
else
|
||||
|
@ -5016,9 +5011,6 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
|
|||
switch (port->state) {
|
||||
case SNK_TRANSITION_SINK_VBUS:
|
||||
port->explicit_contract = true;
|
||||
/* Set the VDM flag ASAP */
|
||||
if (port->data_role == TYPEC_HOST && port->send_discover)
|
||||
port->vdm_sm_running = true;
|
||||
tcpm_set_state(port, SNK_READY, 0);
|
||||
break;
|
||||
case SNK_DISCOVERY:
|
||||
|
@ -5412,15 +5404,18 @@ static void tcpm_send_discover_work(struct kthread_work *work)
|
|||
if (!port->send_discover)
|
||||
goto unlock;
|
||||
|
||||
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
|
||||
port->send_discover = false;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Retry if the port is not idle */
|
||||
if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
|
||||
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Only send the Message if the port is host for PD rev2.0 */
|
||||
if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
|
||||
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
|
||||
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&port->lock);
|
||||
|
|
|
@ -359,7 +359,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||
iov = wiov;
|
||||
else {
|
||||
iov = riov;
|
||||
if (unlikely(wiov && wiov->i)) {
|
||||
if (unlikely(wiov && wiov->used)) {
|
||||
vringh_bad("Readable desc %p after writable",
|
||||
&descs[i]);
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
|
|||
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
||||
struct device *dev = get_device(&vp_dev->vdev.dev);
|
||||
|
||||
/*
|
||||
* Device is marked broken on surprise removal so that virtio upper
|
||||
* layers can abort any ongoing operation.
|
||||
*/
|
||||
if (!pci_device_is_present(pci_dev))
|
||||
virtio_break_device(&vp_dev->vdev);
|
||||
|
||||
pci_disable_sriov(pci_dev);
|
||||
|
||||
unregister_virtio_device(&vp_dev->vdev);
|
||||
|
|
|
@ -2268,7 +2268,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
|
|||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
return vq->broken;
|
||||
return READ_ONCE(vq->broken);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
|
||||
|
||||
|
@ -2283,7 +2283,9 @@ void virtio_break_device(struct virtio_device *dev)
|
|||
spin_lock(&dev->vqs_list_lock);
|
||||
list_for_each_entry(_vq, &dev->vqs, list) {
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
vq->broken = true;
|
||||
|
||||
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
|
||||
WRITE_ONCE(vq->broken, true);
|
||||
}
|
||||
spin_unlock(&dev->vqs_list_lock);
|
||||
}
|
||||
|
|
|
@ -149,6 +149,9 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
|
|||
if (!name)
|
||||
return NULL;
|
||||
|
||||
if (index >= vdpa->nvqs)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
/* Queue shouldn't already be set up. */
|
||||
if (ops->get_vq_ready(vdpa, index))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
|
|
@ -603,7 +603,7 @@ again:
|
|||
* inode has not been flagged as nocompress. This flag can
|
||||
* change at any time if we discover bad compression ratios.
|
||||
*/
|
||||
if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) {
|
||||
if (inode_need_compress(BTRFS_I(inode), start, end)) {
|
||||
WARN_ON(pages);
|
||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
||||
if (!pages) {
|
||||
|
|
|
@ -2137,7 +2137,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
|
|||
|
||||
if (IS_ERR(device)) {
|
||||
if (PTR_ERR(device) == -ENOENT &&
|
||||
strcmp(device_path, "missing") == 0)
|
||||
device_path && strcmp(device_path, "missing") == 0)
|
||||
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
|
||||
else
|
||||
ret = PTR_ERR(device);
|
||||
|
|
|
@ -1753,7 +1753,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
|
|||
|
||||
struct ceph_cap_flush *ceph_alloc_cap_flush(void)
|
||||
{
|
||||
return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
|
||||
struct ceph_cap_flush *cf;
|
||||
|
||||
cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
|
||||
cf->is_capsnap = false;
|
||||
return cf;
|
||||
}
|
||||
|
||||
void ceph_free_cap_flush(struct ceph_cap_flush *cf)
|
||||
|
@ -1788,7 +1792,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
|
|||
prev->wake = true;
|
||||
wake = false;
|
||||
}
|
||||
list_del(&cf->g_list);
|
||||
list_del_init(&cf->g_list);
|
||||
return wake;
|
||||
}
|
||||
|
||||
|
@ -1803,7 +1807,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
|
|||
prev->wake = true;
|
||||
wake = false;
|
||||
}
|
||||
list_del(&cf->i_list);
|
||||
list_del_init(&cf->i_list);
|
||||
return wake;
|
||||
}
|
||||
|
||||
|
@ -2423,7 +2427,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
|
|||
ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
|
||||
|
||||
list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) {
|
||||
if (!cf->caps) {
|
||||
if (cf->is_capsnap) {
|
||||
last_snap_flush = cf->tid;
|
||||
break;
|
||||
}
|
||||
|
@ -2442,7 +2446,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
|
|||
|
||||
first_tid = cf->tid + 1;
|
||||
|
||||
if (cf->caps) {
|
||||
if (!cf->is_capsnap) {
|
||||
struct cap_msg_args arg;
|
||||
|
||||
dout("kick_flushing_caps %p cap %p tid %llu %s\n",
|
||||
|
@ -3589,7 +3593,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
|
|||
cleaned = cf->caps;
|
||||
|
||||
/* Is this a capsnap? */
|
||||
if (cf->caps == 0)
|
||||
if (cf->is_capsnap)
|
||||
continue;
|
||||
|
||||
if (cf->tid <= flush_tid) {
|
||||
|
@ -3662,8 +3666,9 @@ out:
|
|||
while (!list_empty(&to_remove)) {
|
||||
cf = list_first_entry(&to_remove,
|
||||
struct ceph_cap_flush, i_list);
|
||||
list_del(&cf->i_list);
|
||||
ceph_free_cap_flush(cf);
|
||||
list_del_init(&cf->i_list);
|
||||
if (!cf->is_capsnap)
|
||||
ceph_free_cap_flush(cf);
|
||||
}
|
||||
|
||||
if (wake_ci)
|
||||
|
|
|
@ -1621,7 +1621,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
spin_lock(&mdsc->cap_dirty_lock);
|
||||
|
||||
list_for_each_entry(cf, &to_remove, i_list)
|
||||
list_del(&cf->g_list);
|
||||
list_del_init(&cf->g_list);
|
||||
|
||||
if (!list_empty(&ci->i_dirty_item)) {
|
||||
pr_warn_ratelimited(
|
||||
|
@ -1673,8 +1673,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
struct ceph_cap_flush *cf;
|
||||
cf = list_first_entry(&to_remove,
|
||||
struct ceph_cap_flush, i_list);
|
||||
list_del(&cf->i_list);
|
||||
ceph_free_cap_flush(cf);
|
||||
list_del_init(&cf->i_list);
|
||||
if (!cf->is_capsnap)
|
||||
ceph_free_cap_flush(cf);
|
||||
}
|
||||
|
||||
wake_up_all(&ci->i_cap_wq);
|
||||
|
|
|
@ -487,6 +487,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
|||
pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
|
||||
return;
|
||||
}
|
||||
capsnap->cap_flush.is_capsnap = true;
|
||||
INIT_LIST_HEAD(&capsnap->cap_flush.i_list);
|
||||
INIT_LIST_HEAD(&capsnap->cap_flush.g_list);
|
||||
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
used = __ceph_caps_used(ci);
|
||||
|
|
|
@ -182,8 +182,9 @@ struct ceph_cap {
|
|||
|
||||
struct ceph_cap_flush {
|
||||
u64 tid;
|
||||
int caps; /* 0 means capsnap */
|
||||
int caps;
|
||||
bool wake; /* wake up flush waiters when finish ? */
|
||||
bool is_capsnap; /* true means capsnap */
|
||||
struct list_head g_list; // global
|
||||
struct list_head i_list; // per inode
|
||||
};
|
||||
|
|
|
@ -384,3 +384,47 @@ err_kfree:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
|
||||
|
||||
/**
|
||||
* fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks
|
||||
* @path: the path for the encrypted symlink being queried
|
||||
* @stat: the struct being filled with the symlink's attributes
|
||||
*
|
||||
* Override st_size of encrypted symlinks to be the length of the decrypted
|
||||
* symlink target (or the no-key encoded symlink target, if the key is
|
||||
* unavailable) rather than the length of the encrypted symlink target. This is
|
||||
* necessary for st_size to match the symlink target that userspace actually
|
||||
* sees. POSIX requires this, and some userspace programs depend on it.
|
||||
*
|
||||
* This requires reading the symlink target from disk if needed, setting up the
|
||||
* inode's encryption key if possible, and then decrypting or encoding the
|
||||
* symlink target. This makes lstat() more heavyweight than is normally the
|
||||
* case. However, decrypted symlink targets will be cached in ->i_link, so
|
||||
* usually the symlink won't have to be read and decrypted again later if/when
|
||||
* it is actually followed, readlink() is called, or lstat() is called again.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat)
|
||||
{
|
||||
struct dentry *dentry = path->dentry;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
const char *link;
|
||||
DEFINE_DELAYED_CALL(done);
|
||||
|
||||
/*
|
||||
* To get the symlink target that userspace will see (whether it's the
|
||||
* decrypted target or the no-key encoded target), we can just get it in
|
||||
* the same way the VFS does during path resolution and readlink().
|
||||
*/
|
||||
link = READ_ONCE(inode->i_link);
|
||||
if (!link) {
|
||||
link = inode->i_op->get_link(dentry, inode, &done);
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
stat->size = strlen(link);
|
||||
do_delayed_call(&done);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr);
|
||||
|
|
|
@ -52,10 +52,20 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
|
|||
return paddr;
|
||||
}
|
||||
|
||||
static int ext4_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
|
||||
const struct path *path,
|
||||
struct kstat *stat, u32 request_mask,
|
||||
unsigned int query_flags)
|
||||
{
|
||||
ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
|
||||
|
||||
return fscrypt_symlink_getattr(path, stat);
|
||||
}
|
||||
|
||||
const struct inode_operations ext4_encrypted_symlink_inode_operations = {
|
||||
.get_link = ext4_encrypted_get_link,
|
||||
.setattr = ext4_setattr,
|
||||
.getattr = ext4_getattr,
|
||||
.getattr = ext4_encrypted_symlink_getattr,
|
||||
.listxattr = ext4_listxattr,
|
||||
};
|
||||
|
||||
|
|
|
@ -1313,9 +1313,19 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
|
|||
return target;
|
||||
}
|
||||
|
||||
static int f2fs_encrypted_symlink_getattr(struct user_namespace *mnt_userns,
|
||||
const struct path *path,
|
||||
struct kstat *stat, u32 request_mask,
|
||||
unsigned int query_flags)
|
||||
{
|
||||
f2fs_getattr(mnt_userns, path, stat, request_mask, query_flags);
|
||||
|
||||
return fscrypt_symlink_getattr(path, stat);
|
||||
}
|
||||
|
||||
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
|
||||
.get_link = f2fs_encrypted_get_link,
|
||||
.getattr = f2fs_getattr,
|
||||
.getattr = f2fs_encrypted_symlink_getattr,
|
||||
.setattr = f2fs_setattr,
|
||||
.listxattr = f2fs_listxattr,
|
||||
};
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user