mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
This is the 5.13.6 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEBUFMACgkQONu9yGCS aT6UVw//bTkipihT/4NEX1oVNwUKnjRO6mJ1FUcWJzfiSZlDVLP8gi5FNWQ1Y0XL TEuZmIzxq2nZMwMC+VldE1fBDJ/hT8NOK4FUWz9e64qdrodttubEFtsFRbLdou7B nynYFxbPlcEHLj3MQ7SLir93H4B2D1RpSrzXEISrqQvyWE4gtObo2VKesvfioIKP RPNwntDjynJt7jtFFTNqlWpBb26L0RG3rpQlxZmB53g4u6LKg9MT6Kcwndx9G903 uIEn8TXJshDJZP8ZQ9V/vi6DvFprsChC+6m90x0NmwqO35nQ6WewmOM4n3rXkPX5 NrKf4mS1OK3DEkUUuEXwj/15ll5hIh1lyRTApeoY9H6sLTFo6/8EIEh4hpRed9Aa GlYblzeXuEJ7nwtMTFZWqqJLLjltd981rPxw1v6tyRs0GBgrnt7ggJdCs7azRRQQ ueM/bIqPCVisvno6FyWEIM/TrngYnWGv10y2mQN6f0xDVpyk97A9sejXB0Vd124K +eVStH74ixPdK+ChITelAQoVIKgcIw4x3U3TvQrT0deyx3kx+R6rbwdfV41zdq8P vlBpWQ8xnp3UQEEsvNnGH7sFXvJvIJUUNBPy/aRRyNXfqrbSaE+i9DzFZkyNhNoP CYskgxieIZvz+ahD7oDH6ul9AapjiaJhB8qDNPXcdU09tFoV0q0= =LzFp -----END PGP SIGNATURE----- Merge tag 'v5.13.6' into v5.13/standard/base This is the 5.13.6 stable release # gpg: Signature made Wed 28 Jul 2021 08:40:51 AM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
01c0ac5183
|
@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
|
|||
|
||||
1. User addresses not accessed by the kernel but used for address space
|
||||
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
|
||||
tagged pointers in this context is allowed with the exception of
|
||||
``brk()``, ``mmap()`` and the ``new_address`` argument to
|
||||
``mremap()`` as these have the potential to alias with existing
|
||||
user addresses.
|
||||
tagged pointers in this context is allowed with these exceptions:
|
||||
|
||||
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
|
||||
incorrectly accept valid tagged pointers for the ``brk()``,
|
||||
``mmap()`` and ``mremap()`` system calls.
|
||||
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
|
||||
``mremap()`` as these have the potential to alias with existing
|
||||
user addresses.
|
||||
|
||||
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
|
||||
incorrectly accept valid tagged pointers for the ``brk()``,
|
||||
``mmap()`` and ``mremap()`` system calls.
|
||||
|
||||
- The ``range.start``, ``start`` and ``dst`` arguments to the
|
||||
``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
|
||||
``userfaultfd()``, as fault addresses subsequently obtained by reading
|
||||
the file descriptor will be untagged, which may otherwise confuse
|
||||
tag-unaware programs.
|
||||
|
||||
NOTE: This behaviour changed in v5.14 and so some earlier kernels may
|
||||
incorrectly accept valid tagged pointers for this system call.
|
||||
|
||||
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
|
||||
relaxation is disabled by default and the application thread needs to
|
||||
|
|
|
@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user.
|
|||
|
||||
As a technical note, when directories and files are specified, the
|
||||
entire CONFIG_INITRAMFS_SOURCE is passed to
|
||||
usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE
|
||||
usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE
|
||||
can really be interpreted as any legal argument to
|
||||
gen_initramfs_list.sh. If a directory is specified as an argument then
|
||||
gen_initramfs.sh. If a directory is specified as an argument then
|
||||
the contents are scanned, uid/gid translation is performed, and
|
||||
usr/gen_init_cpio file directives are output. If a directory is
|
||||
specified as an argument to usr/gen_initramfs_list.sh then the
|
||||
specified as an argument to usr/gen_initramfs.sh then the
|
||||
contents of the file are simply copied to the output. All of the output
|
||||
directives from directory scanning and file contents copying are
|
||||
processed by usr/gen_init_cpio.
|
||||
|
||||
See also 'usr/gen_initramfs_list.sh -h'.
|
||||
See also 'usr/gen_initramfs.sh -h'.
|
||||
|
||||
Where's this all leading?
|
||||
=========================
|
||||
|
|
|
@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de
|
|||
The kernel does not depend on external cpio tools. If you specify a
|
||||
directory instead of a configuration file, the kernel's build infrastructure
|
||||
creates a configuration file from that directory (usr/Makefile calls
|
||||
usr/gen_initramfs_list.sh), and proceeds to package up that directory
|
||||
usr/gen_initramfs.sh), and proceeds to package up that directory
|
||||
using the config file (by feeding it to usr/gen_init_cpio, which is created
|
||||
from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is
|
||||
entirely self-contained, and the kernel's boot-time extractor is also
|
||||
|
|
|
@ -772,7 +772,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
|
|||
initial value when the blackhole issue goes away.
|
||||
0 to disable the blackhole detection.
|
||||
|
||||
By default, it is set to 1hr.
|
||||
By default, it is set to 0 (feature is disabled).
|
||||
|
||||
tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
|
||||
The list consists of a primary key and an optional backup key. The
|
||||
|
|
|
@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
|
|||
with the event, in nanoseconds. May be
|
||||
modified by .usecs to have timestamps
|
||||
interpreted as microseconds.
|
||||
cpu int the cpu on which the event occurred.
|
||||
common_cpu int the cpu on which the event occurred.
|
||||
====================== ==== =======================================
|
||||
|
||||
Extended error information
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 5
|
||||
SUBLEVEL = 6
|
||||
EXTRAVERSION =
|
||||
NAME = Opossums on Parade
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "aspeed-g5.dtsi"
|
||||
#include <dt-bindings/gpio/aspeed-gpio.h>
|
||||
#include <dt-bindings/i2c/i2c.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
/{
|
||||
model = "ASRock E3C246D4I BMC";
|
||||
|
@ -73,7 +74,8 @@
|
|||
|
||||
&vuart {
|
||||
status = "okay";
|
||||
aspeed,sirq-active-high;
|
||||
aspeed,lpc-io-reg = <0x2f8>;
|
||||
aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
&mac0 {
|
||||
|
|
|
@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y
|
|||
CONFIG_USB_HSIC_USB3503=y
|
||||
CONFIG_AB8500_USB=y
|
||||
CONFIG_KEYSTONE_USB_PHY=m
|
||||
CONFIG_NOP_USB_XCEIV=m
|
||||
CONFIG_NOP_USB_XCEIV=y
|
||||
CONFIG_AM335X_PHY_USB=m
|
||||
CONFIG_TWL6030_USB=m
|
||||
CONFIG_USB_GPIO_VBUS=y
|
||||
|
|
|
@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector
|
|||
# It's not safe to invoke KCOV when portions of the kernel environment aren't
|
||||
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
|
||||
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
|
||||
KCOV_INSTRUMENT_entry.o := n
|
||||
KCOV_INSTRUMENT_entry-common.o := n
|
||||
|
||||
# Object file lists.
|
||||
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
|
||||
|
|
|
@ -185,18 +185,6 @@ void mte_check_tfsr_el1(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void update_gcr_el1_excl(u64 excl)
|
||||
{
|
||||
|
||||
/*
|
||||
* Note that the mask controlled by the user via prctl() is an
|
||||
* include while GCR_EL1 accepts an exclude mask.
|
||||
* No need for ISB since this only affects EL0 currently, implicit
|
||||
* with ERET.
|
||||
*/
|
||||
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
|
||||
}
|
||||
|
||||
static void set_gcr_el1_excl(u64 excl)
|
||||
{
|
||||
current->thread.gcr_user_excl = excl;
|
||||
|
@ -257,7 +245,8 @@ void mte_suspend_exit(void)
|
|||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
update_gcr_el1_excl(gcr_kernel_excl);
|
||||
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
|
||||
isb();
|
||||
}
|
||||
|
||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
|
|
|
@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
(!vma || addr + len <= vma->vm_start))
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
|
|
@ -2445,8 +2445,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
|
|||
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
vcpu->arch.hfscr |= HFSCR_TM;
|
||||
#endif
|
||||
}
|
||||
if (cpu_has_feature(CPU_FTR_TM_COMP))
|
||||
vcpu->arch.hfscr |= HFSCR_TM;
|
||||
|
|
|
@ -301,6 +301,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->kvm->arch.l1_ptcr == 0)
|
||||
return H_NOT_AVAILABLE;
|
||||
|
||||
if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
|
||||
return H_BAD_MODE;
|
||||
|
||||
/* copy parameters in */
|
||||
hv_ptr = kvmppc_get_gpr(vcpu, 4);
|
||||
regs_ptr = kvmppc_get_gpr(vcpu, 5);
|
||||
|
@ -321,6 +324,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
if (l2_hv.vcpu_token >= NR_CPUS)
|
||||
return H_PARAMETER;
|
||||
|
||||
/*
|
||||
* L1 must have set up a suspended state to enter the L2 in a
|
||||
* transactional state, and only in that case. These have to be
|
||||
* filtered out here to prevent causing a TM Bad Thing in the
|
||||
* host HRFID. We could synthesize a TM Bad Thing back to the L1
|
||||
* here but there doesn't seem like much point.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
|
||||
if (!MSR_TM_ACTIVE(l2_regs.msr))
|
||||
return H_BAD_MODE;
|
||||
} else {
|
||||
if (l2_regs.msr & MSR_TS_MASK)
|
||||
return H_BAD_MODE;
|
||||
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
|
||||
return H_BAD_MODE;
|
||||
}
|
||||
|
||||
/* translate lpid */
|
||||
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
|
||||
if (!l2)
|
||||
|
|
|
@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
* value so we can restore it on the way out.
|
||||
*/
|
||||
orig_rets = args.rets;
|
||||
if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
|
||||
/*
|
||||
* Don't overflow our args array: ensure there is room for
|
||||
* at least rets[0] (even if the call specifies 0 nret).
|
||||
*
|
||||
* Each handler must then check for the correct nargs and nret
|
||||
* values, but they may always return failure in rets[0].
|
||||
*/
|
||||
rc = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
args.rets = &args.args[be32_to_cpu(args.nargs)];
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
fail:
|
||||
/*
|
||||
* We only get here if the guest has called RTAS with a bogus
|
||||
* args pointer. That means we can't get to the args, and so we
|
||||
* can't fail the RTAS call. So fail right out to userspace,
|
||||
* which should kill the guest.
|
||||
* args pointer or nargs/nret values that would overflow the
|
||||
* array. That means we can't get to the args, and so we can't
|
||||
* fail the RTAS call. So fail right out to userspace, which
|
||||
* should kill the guest.
|
||||
*
|
||||
* SLOF should actually pass the hcall return value from the
|
||||
* rtas handler call in r3, so enter_rtas could be modified to
|
||||
* return a failure indication in r3 and we could return such
|
||||
* errors to the guest rather than failing to host userspace.
|
||||
* However old guests that don't test for failure could then
|
||||
* continue silently after errors, so for now we won't do this.
|
||||
*/
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -2045,9 +2045,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
{
|
||||
struct kvm_enable_cap cap;
|
||||
r = -EFAULT;
|
||||
vcpu_load(vcpu);
|
||||
if (copy_from_user(&cap, argp, sizeof(cap)))
|
||||
goto out;
|
||||
vcpu_load(vcpu);
|
||||
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
||||
vcpu_put(vcpu);
|
||||
break;
|
||||
|
@ -2071,9 +2071,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
case KVM_DIRTY_TLB: {
|
||||
struct kvm_dirty_tlb dirty;
|
||||
r = -EFAULT;
|
||||
vcpu_load(vcpu);
|
||||
if (copy_from_user(&dirty, argp, sizeof(dirty)))
|
||||
goto out;
|
||||
vcpu_load(vcpu);
|
||||
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
|
||||
vcpu_put(vcpu);
|
||||
break;
|
||||
|
|
|
@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
|
|||
|
||||
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
|
||||
|
||||
/* Load initrd at enough distance from DRAM start */
|
||||
/* Load initrd anywhere in system RAM */
|
||||
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
|
||||
{
|
||||
return image_addr + SZ_256M;
|
||||
return ULONG_MAX;
|
||||
}
|
||||
|
||||
#define alloc_screen_info(x...) (&screen_info)
|
||||
|
|
|
@ -123,7 +123,7 @@ void __init setup_bootmem(void)
|
|||
{
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
phys_addr_t dram_end = memblock_end_of_DRAM();
|
||||
phys_addr_t dram_end;
|
||||
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
|
@ -146,6 +146,8 @@ void __init setup_bootmem(void)
|
|||
#endif
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
|
||||
dram_end = memblock_end_of_DRAM();
|
||||
|
||||
/*
|
||||
* memblock allocator is not aware of the fact that last 4K bytes of
|
||||
* the addressable memory can not be mapped because of IS_ERR_VALUE
|
||||
|
|
|
@ -9,16 +9,6 @@
|
|||
#include <asm/errno.h>
|
||||
#include <asm/sigp.h>
|
||||
|
||||
#ifdef CC_USING_EXPOLINE
|
||||
.pushsection .dma.text.__s390_indirect_jump_r14,"axG"
|
||||
__dma__s390_indirect_jump_r14:
|
||||
larl %r1,0f
|
||||
ex 0,0(%r1)
|
||||
j .
|
||||
0: br %r14
|
||||
.popsection
|
||||
#endif
|
||||
|
||||
.section .dma.text,"ax"
|
||||
/*
|
||||
* Simplified version of expoline thunk. The normal thunks can not be used here,
|
||||
|
@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
|
|||
* affects a few functions that are not performance-relevant.
|
||||
*/
|
||||
.macro BR_EX_DMA_r14
|
||||
#ifdef CC_USING_EXPOLINE
|
||||
jg __dma__s390_indirect_jump_r14
|
||||
#else
|
||||
br %r14
|
||||
#endif
|
||||
larl %r1,0f
|
||||
ex 0,0(%r1)
|
||||
j .
|
||||
0: br %r14
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
|
|
@ -19,6 +19,7 @@ void ftrace_caller(void);
|
|||
|
||||
extern char ftrace_graph_caller_end;
|
||||
extern unsigned long ftrace_plt;
|
||||
extern void *ftrace_func;
|
||||
|
||||
struct dyn_arch_ftrace { };
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
* trampoline (ftrace_plt), which clobbers also r1.
|
||||
*/
|
||||
|
||||
void *ftrace_func __read_mostly = ftrace_stub;
|
||||
unsigned long ftrace_plt;
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
|
@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
ftrace_func = func;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,13 +59,13 @@ ENTRY(ftrace_caller)
|
|||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
aghik %r2,%r0,-MCOUNT_INSN_SIZE
|
||||
lgrl %r4,function_trace_op
|
||||
lgrl %r1,ftrace_trace_function
|
||||
lgrl %r1,ftrace_func
|
||||
#else
|
||||
lgr %r2,%r0
|
||||
aghi %r2,-MCOUNT_INSN_SIZE
|
||||
larl %r4,function_trace_op
|
||||
lg %r4,0(%r4)
|
||||
larl %r1,ftrace_trace_function
|
||||
larl %r1,ftrace_func
|
||||
lg %r1,0(%r1)
|
||||
#endif
|
||||
lgr %r3,%r14
|
||||
|
|
|
@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
|
|||
{
|
||||
u32 r1 = reg2hex[b1];
|
||||
|
||||
if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
|
||||
if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
|
||||
jit->seen_reg[r1] = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -765,7 +765,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
|||
|
||||
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
|
||||
edx.split.bit_width_fixed = cap.bit_width_fixed;
|
||||
edx.split.anythread_deprecated = 1;
|
||||
if (cap.version)
|
||||
edx.split.anythread_deprecated = 1;
|
||||
edx.split.reserved1 = 0;
|
||||
edx.split.reserved2 = 0;
|
||||
|
||||
|
|
|
@ -1271,8 +1271,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
/* Pin guest memory */
|
||||
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
|
||||
PAGE_SIZE, &n, 0);
|
||||
if (!guest_page)
|
||||
return -EFAULT;
|
||||
if (IS_ERR(guest_page))
|
||||
return PTR_ERR(guest_page);
|
||||
|
||||
/* allocate memory for header and transport buffer */
|
||||
ret = -ENOMEM;
|
||||
|
@ -1309,8 +1309,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
}
|
||||
|
||||
/* Copy packet header to userspace. */
|
||||
ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
|
||||
params.hdr_len);
|
||||
if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
|
||||
params.hdr_len))
|
||||
ret = -EFAULT;
|
||||
|
||||
e_free_trans_data:
|
||||
kfree(trans_data);
|
||||
|
@ -1462,11 +1463,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
data.trans_len = params.trans_len;
|
||||
|
||||
/* Pin guest memory */
|
||||
ret = -EFAULT;
|
||||
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
|
||||
PAGE_SIZE, &n, 0);
|
||||
if (!guest_page)
|
||||
if (IS_ERR(guest_page)) {
|
||||
ret = PTR_ERR(guest_page);
|
||||
goto e_free_trans;
|
||||
}
|
||||
|
||||
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
|
||||
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
|
||||
|
|
|
@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
|
|||
config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
|
||||
bool "Override ACPI tables from built-in initrd"
|
||||
depends on ACPI_TABLE_UPGRADE
|
||||
depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
|
||||
depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
|
||||
help
|
||||
This option provides functionality to override arbitrary ACPI tables
|
||||
from built-in uncompressed initrd.
|
||||
|
|
|
@ -846,11 +846,9 @@ EXPORT_SYMBOL(acpi_dev_present);
|
|||
* Return the next match of ACPI device if another matching device was present
|
||||
* at the moment of invocation, or NULL otherwise.
|
||||
*
|
||||
* FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
|
||||
* in the case of a hotplug event. That said, the caller should ensure that
|
||||
* this will never happen.
|
||||
*
|
||||
* The caller is responsible for invoking acpi_dev_put() on the returned device.
|
||||
* On the other hand the function invokes acpi_dev_put() on the given @adev
|
||||
* assuming that its reference counter had been increased beforehand.
|
||||
*
|
||||
* See additional information in acpi_dev_present() as well.
|
||||
*/
|
||||
|
@ -866,6 +864,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
|
|||
match.hrv = hrv;
|
||||
|
||||
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
|
||||
acpi_dev_put(adev);
|
||||
return dev ? to_acpi_device(dev) : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
|
||||
|
|
|
@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
|
|||
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
|
||||
struct module *owner, const char *modname)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
|
|||
auxdrv->driver.bus = &auxiliary_bus_type;
|
||||
auxdrv->driver.mod_name = modname;
|
||||
|
||||
return driver_register(&auxdrv->driver);
|
||||
ret = driver_register(&auxdrv->driver);
|
||||
if (ret)
|
||||
kfree(auxdrv->driver.name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
|
||||
|
||||
|
|
|
@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
|
|||
return;
|
||||
}
|
||||
|
||||
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
|
||||
sysfs_remove_link(&con->kobj, buf);
|
||||
if (device_is_registered(con)) {
|
||||
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
|
||||
sysfs_remove_link(&con->kobj, buf);
|
||||
}
|
||||
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
|
||||
sysfs_remove_link(&sup->kobj, buf);
|
||||
kfree(buf);
|
||||
|
|
|
@ -4100,8 +4100,6 @@ again:
|
|||
|
||||
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
|
||||
{
|
||||
bool need_wait;
|
||||
|
||||
dout("%s rbd_dev %p\n", __func__, rbd_dev);
|
||||
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
|
||||
|
||||
|
@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
|
|||
*/
|
||||
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
|
||||
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
|
||||
need_wait = !list_empty(&rbd_dev->running_list);
|
||||
downgrade_write(&rbd_dev->lock_rwsem);
|
||||
if (need_wait)
|
||||
wait_for_completion(&rbd_dev->releasing_wait);
|
||||
up_read(&rbd_dev->lock_rwsem);
|
||||
if (list_empty(&rbd_dev->running_list))
|
||||
return true;
|
||||
|
||||
up_write(&rbd_dev->lock_rwsem);
|
||||
wait_for_completion(&rbd_dev->releasing_wait);
|
||||
|
||||
down_write(&rbd_dev->lock_rwsem);
|
||||
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
|
||||
|
@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
|
|||
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
|
||||
down_write(&rbd_dev->lock_rwsem);
|
||||
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
|
||||
/*
|
||||
* we already know that the remote client is
|
||||
* the owner
|
||||
*/
|
||||
up_write(&rbd_dev->lock_rwsem);
|
||||
return;
|
||||
dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
|
||||
__func__, rbd_dev, cid.gid, cid.handle);
|
||||
} else {
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
}
|
||||
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
downgrade_write(&rbd_dev->lock_rwsem);
|
||||
} else {
|
||||
down_read(&rbd_dev->lock_rwsem);
|
||||
|
@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
|
|||
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
|
||||
down_write(&rbd_dev->lock_rwsem);
|
||||
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
|
||||
dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
|
||||
dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
|
||||
__func__, rbd_dev, cid.gid, cid.handle,
|
||||
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
|
||||
up_write(&rbd_dev->lock_rwsem);
|
||||
return;
|
||||
} else {
|
||||
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
|
||||
}
|
||||
|
||||
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
|
||||
downgrade_write(&rbd_dev->lock_rwsem);
|
||||
} else {
|
||||
down_read(&rbd_dev->lock_rwsem);
|
||||
|
|
|
@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
|
|||
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
|
||||
|
||||
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
|
||||
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
||||
write_lock_bh(&mhi_chan->lock);
|
||||
mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
|
||||
complete(&mhi_chan->completion);
|
||||
write_unlock_bh(&mhi_chan->lock);
|
||||
|
||||
if (chan < mhi_cntrl->max_chan &&
|
||||
mhi_cntrl->mhi_chan[chan].configured) {
|
||||
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
||||
write_lock_bh(&mhi_chan->lock);
|
||||
mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
|
||||
complete(&mhi_chan->completion);
|
||||
write_unlock_bh(&mhi_chan->lock);
|
||||
} else {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Completion packet for invalid channel ID: %d\n", chan);
|
||||
}
|
||||
|
||||
mhi_del_ring_element(mhi_cntrl, mhi_ring);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
* @edl: emergency download mode firmware path (if any)
|
||||
* @bar_num: PCI base address register to use for MHI MMIO register space
|
||||
* @dma_data_width: DMA transfer word size (32 or 64 bits)
|
||||
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
|
||||
* of inband wake support (such as sdx24)
|
||||
*/
|
||||
struct mhi_pci_dev_info {
|
||||
const struct mhi_controller_config *config;
|
||||
|
@ -40,6 +42,7 @@ struct mhi_pci_dev_info {
|
|||
const char *edl;
|
||||
unsigned int bar_num;
|
||||
unsigned int dma_data_width;
|
||||
bool sideband_wake;
|
||||
};
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
|
||||
|
@ -72,6 +75,22 @@ struct mhi_pci_dev_info {
|
|||
.doorbell_mode_switch = false, \
|
||||
}
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
|
||||
{ \
|
||||
.num = ch_num, \
|
||||
.name = ch_name, \
|
||||
.num_elements = el_count, \
|
||||
.event_ring = ev_ring, \
|
||||
.dir = DMA_FROM_DEVICE, \
|
||||
.ee_mask = BIT(MHI_EE_AMSS), \
|
||||
.pollcfg = 0, \
|
||||
.doorbell = MHI_DB_BRST_DISABLE, \
|
||||
.lpm_notify = false, \
|
||||
.offload_channel = false, \
|
||||
.doorbell_mode_switch = false, \
|
||||
.auto_queue = true, \
|
||||
}
|
||||
|
||||
#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
|
||||
{ \
|
||||
.num_elements = el_count, \
|
||||
|
@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
|
|||
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
|
||||
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
|
||||
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
|
||||
|
@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
|
|||
.edl = "qcom/sdx65m/edl.mbn",
|
||||
.config = &modem_qcom_v1_mhiv_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = false,
|
||||
};
|
||||
|
||||
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
|
||||
|
@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
|
|||
.edl = "qcom/sdx55m/edl.mbn",
|
||||
.config = &modem_qcom_v1_mhiv_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = false,
|
||||
};
|
||||
|
||||
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
|
||||
|
@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
|
|||
.edl = "qcom/prog_firehose_sdx24.mbn",
|
||||
.config = &modem_qcom_v1_mhiv_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = true,
|
||||
};
|
||||
|
||||
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
|
||||
|
@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
|
|||
.edl = "qcom/prog_firehose_sdx24.mbn",
|
||||
.config = &modem_quectel_em1xx_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = true,
|
||||
};
|
||||
|
||||
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
|
||||
|
@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
|
|||
.edl = "qcom/sdx55m/edl.mbn",
|
||||
.config = &modem_foxconn_sdx55_config,
|
||||
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
|
||||
.dma_data_width = 32
|
||||
.dma_data_width = 32,
|
||||
.sideband_wake = false,
|
||||
};
|
||||
|
||||
static const struct pci_device_id mhi_pci_id_table[] = {
|
||||
|
@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
mhi_cntrl->status_cb = mhi_pci_status_cb;
|
||||
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
|
||||
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
|
||||
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
|
||||
mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
|
||||
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
|
||||
|
||||
if (info->sideband_wake) {
|
||||
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
|
||||
mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
|
||||
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
|
||||
}
|
||||
|
||||
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
|
||||
if (err)
|
||||
|
|
|
@ -139,6 +139,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
|
|||
{
|
||||
int retval;
|
||||
|
||||
if (!driver->probe)
|
||||
return -EINVAL;
|
||||
|
||||
retval = scmi_protocol_device_request(driver->id_table);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
|
|
@ -12,52 +12,38 @@
|
|||
#include <linux/efi.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
struct acpi_hid_uid {
|
||||
struct acpi_device_id hid[2];
|
||||
char uid[11]; /* UINT_MAX + null byte */
|
||||
};
|
||||
|
||||
static int __init match_acpi_dev(struct device *dev, const void *data)
|
||||
{
|
||||
struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data;
|
||||
struct acpi_device *adev = to_acpi_device(dev);
|
||||
|
||||
if (acpi_match_device_ids(adev, hid_uid.hid))
|
||||
return 0;
|
||||
|
||||
if (adev->pnp.unique_id)
|
||||
return !strcmp(adev->pnp.unique_id, hid_uid.uid);
|
||||
else
|
||||
return !strcmp("0", hid_uid.uid);
|
||||
}
|
||||
|
||||
static long __init parse_acpi_path(const struct efi_dev_path *node,
|
||||
struct device *parent, struct device **child)
|
||||
{
|
||||
struct acpi_hid_uid hid_uid = {};
|
||||
char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */
|
||||
struct acpi_device *adev;
|
||||
struct device *phys_dev;
|
||||
|
||||
if (node->header.length != 12)
|
||||
return -EINVAL;
|
||||
|
||||
sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
|
||||
sprintf(hid, "%c%c%c%04X",
|
||||
'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
|
||||
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
|
||||
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
|
||||
node->acpi.hid >> 16);
|
||||
sprintf(hid_uid.uid, "%u", node->acpi.uid);
|
||||
sprintf(uid, "%u", node->acpi.uid);
|
||||
|
||||
*child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
|
||||
match_acpi_dev);
|
||||
if (!*child)
|
||||
for_each_acpi_dev_match(adev, hid, NULL, -1) {
|
||||
if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid))
|
||||
break;
|
||||
if (!adev->pnp.unique_id && node->acpi.uid == 0)
|
||||
break;
|
||||
}
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
|
||||
phys_dev = acpi_get_first_physical_node(adev);
|
||||
if (phys_dev) {
|
||||
get_device(phys_dev);
|
||||
put_device(*child);
|
||||
*child = phys_dev;
|
||||
}
|
||||
*child = get_device(phys_dev);
|
||||
acpi_dev_put(adev);
|
||||
} else
|
||||
*child = &adev->dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
|
|||
static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
|
||||
{
|
||||
struct resource *res, *parent;
|
||||
int ret;
|
||||
|
||||
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
|
||||
if (!res)
|
||||
|
@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
|
|||
|
||||
/* we expect a conflict with a 'System RAM' region */
|
||||
parent = request_resource_conflict(&iomem_resource, res);
|
||||
return parent ? request_resource(parent, res) : 0;
|
||||
ret = parent ? request_resource(parent, res) : 0;
|
||||
|
||||
/*
|
||||
* Given that efi_mem_reserve_iomem() can be called at any
|
||||
* time, only call memblock_reserve() if the architecture
|
||||
* keeps the infrastructure around.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
|
||||
memblock_reserve(addr, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
||||
|
|
|
@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
|
|||
tbl_size = sizeof(*log_tbl) + log_tbl->size;
|
||||
memblock_reserve(efi.tpm_log, tbl_size);
|
||||
|
||||
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
|
||||
log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
|
||||
pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
|
||||
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
|
||||
pr_info("TPM Final Events table not present\n");
|
||||
goto out;
|
||||
} else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
|
||||
pr_warn(FW_BUG "TPM Final Events table invalid\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -3290,6 +3290,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
|
||||
};
|
||||
|
@ -3369,6 +3370,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
|
||||
|
@ -3411,6 +3413,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020)
|
||||
|
|
|
@ -827,6 +827,9 @@ long drm_ioctl(struct file *filp,
|
|||
if (drm_dev_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
||||
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
|
||||
return -ENOTTY;
|
||||
|
||||
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
|
||||
|
||||
if (is_driver_ioctl) {
|
||||
|
|
|
@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
if (drm_WARN_ON(&i915->drm, !engine))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Due to d3_entered is used to indicate skipping PPGTT invalidation on
|
||||
* vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
|
||||
* vGPU reset if in resuming.
|
||||
* In S0ix exit, the device power state also transite from D3 to D0 as
|
||||
* S3 resume, but no vGPU reset (triggered by QEMU devic model). After
|
||||
* S0ix exit, all engines continue to work. However the d3_entered
|
||||
* remains set which will break next vGPU reset logic (miss the expected
|
||||
* PPGTT invalidation).
|
||||
* Engines can only work in D0. Thus the 1st elsp write gives GVT a
|
||||
* chance to clear d3_entered.
|
||||
*/
|
||||
if (vgpu->d3_entered)
|
||||
vgpu->d3_entered = false;
|
||||
|
||||
execlist = &vgpu->submission.execlist[engine->id];
|
||||
|
||||
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
|
||||
|
|
|
@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
|
|||
drm_panel_remove(&ts->base);
|
||||
|
||||
mipi_dsi_device_unregister(ts->dsi);
|
||||
kfree(ts->dsi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -100,6 +100,8 @@ static int ttm_global_init(void)
|
|||
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
|
||||
&glob->bo_count);
|
||||
out:
|
||||
if (ret)
|
||||
--ttm_glob_use_count;
|
||||
mutex_unlock(&ttm_global_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1690,38 +1690,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
|
|||
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
|
||||
|
||||
if (vc4_hdmi->variant->external_irq_controller) {
|
||||
ret = devm_request_threaded_irq(&pdev->dev,
|
||||
platform_get_irq_byname(pdev, "cec-rx"),
|
||||
vc4_cec_irq_handler_rx_bare,
|
||||
vc4_cec_irq_handler_rx_thread, 0,
|
||||
"vc4 hdmi cec rx", vc4_hdmi);
|
||||
ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
|
||||
vc4_cec_irq_handler_rx_bare,
|
||||
vc4_cec_irq_handler_rx_thread, 0,
|
||||
"vc4 hdmi cec rx", vc4_hdmi);
|
||||
if (ret)
|
||||
goto err_delete_cec_adap;
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev,
|
||||
platform_get_irq_byname(pdev, "cec-tx"),
|
||||
vc4_cec_irq_handler_tx_bare,
|
||||
vc4_cec_irq_handler_tx_thread, 0,
|
||||
"vc4 hdmi cec tx", vc4_hdmi);
|
||||
ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
|
||||
vc4_cec_irq_handler_tx_bare,
|
||||
vc4_cec_irq_handler_tx_thread, 0,
|
||||
"vc4 hdmi cec tx", vc4_hdmi);
|
||||
if (ret)
|
||||
goto err_delete_cec_adap;
|
||||
goto err_remove_cec_rx_handler;
|
||||
} else {
|
||||
HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
|
||||
|
||||
ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
|
||||
vc4_cec_irq_handler,
|
||||
vc4_cec_irq_handler_thread, 0,
|
||||
"vc4 hdmi cec", vc4_hdmi);
|
||||
ret = request_threaded_irq(platform_get_irq(pdev, 0),
|
||||
vc4_cec_irq_handler,
|
||||
vc4_cec_irq_handler_thread, 0,
|
||||
"vc4 hdmi cec", vc4_hdmi);
|
||||
if (ret)
|
||||
goto err_delete_cec_adap;
|
||||
}
|
||||
|
||||
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
|
||||
if (ret < 0)
|
||||
goto err_delete_cec_adap;
|
||||
goto err_remove_handlers;
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_handlers:
|
||||
if (vc4_hdmi->variant->external_irq_controller)
|
||||
free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
|
||||
else
|
||||
free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
|
||||
|
||||
err_remove_cec_rx_handler:
|
||||
if (vc4_hdmi->variant->external_irq_controller)
|
||||
free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
|
||||
|
||||
err_delete_cec_adap:
|
||||
cec_delete_adapter(vc4_hdmi->cec_adap);
|
||||
|
||||
|
@ -1730,6 +1738,15 @@ err_delete_cec_adap:
|
|||
|
||||
static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
|
||||
{
|
||||
struct platform_device *pdev = vc4_hdmi->pdev;
|
||||
|
||||
if (vc4_hdmi->variant->external_irq_controller) {
|
||||
free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
|
||||
free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
|
||||
} else {
|
||||
free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
|
||||
}
|
||||
|
||||
cec_unregister_adapter(vc4_hdmi->cec_adap);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
|
|||
ttm_bo_unpin(bo);
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
ttm_bo_unpin(batch->otable_bo);
|
||||
ttm_bo_put(batch->otable_bo);
|
||||
batch->otable_bo = NULL;
|
||||
}
|
||||
|
|
|
@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
|
|||
|
||||
status = readb(i2c->base + MPC_I2C_SR);
|
||||
if (status & CSR_MIF) {
|
||||
/* Read again to allow register to stabilise */
|
||||
status = readb(i2c->base + MPC_I2C_SR);
|
||||
/* Wait up to 100us for transfer to properly complete */
|
||||
readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
|
||||
writeb(0, i2c->base + MPC_I2C_SR);
|
||||
mpc_i2c_do_intr(i2c, status);
|
||||
return IRQ_HANDLED;
|
||||
|
|
|
@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
|
|||
int ret;
|
||||
|
||||
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
|
||||
if (!adev->status.enabled) {
|
||||
acpi_dev_put(adev);
|
||||
if (!adev->status.enabled)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bridge->n_sensors >= CIO2_NUM_PORTS) {
|
||||
acpi_dev_put(adev);
|
||||
|
@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
|
|||
}
|
||||
|
||||
sensor = &bridge->sensors[bridge->n_sensors];
|
||||
sensor->adev = adev;
|
||||
strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
|
||||
|
||||
ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
|
||||
|
@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
|
|||
goto err_free_swnodes;
|
||||
}
|
||||
|
||||
sensor->adev = acpi_dev_get(adev);
|
||||
adev->fwnode.secondary = fwnode;
|
||||
|
||||
dev_info(&cio2->dev, "Found supported sensor %s\n",
|
||||
|
|
|
@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
|
|||
|
||||
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
|
||||
com.cmd.hdr.Length = 6;
|
||||
memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
|
||||
memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
|
||||
com.in_len = 6;
|
||||
com.out_len = 0;
|
||||
|
||||
|
|
|
@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
|
|||
|
||||
struct FW_CONFIGURE_FREE_BUFFERS {
|
||||
struct FW_HEADER hdr;
|
||||
u8 UVI1_BufferLength;
|
||||
u8 UVI2_BufferLength;
|
||||
u8 TVO_BufferLength;
|
||||
u8 AUD1_BufferLength;
|
||||
u8 AUD2_BufferLength;
|
||||
u8 TVA_BufferLength;
|
||||
struct {
|
||||
u8 UVI1_BufferLength;
|
||||
u8 UVI2_BufferLength;
|
||||
u8 TVO_BufferLength;
|
||||
u8 AUD1_BufferLength;
|
||||
u8 AUD2_BufferLength;
|
||||
u8 TVA_BufferLength;
|
||||
} __packed config;
|
||||
} __attribute__ ((__packed__));
|
||||
|
||||
struct FW_CONFIGURE_UART {
|
||||
|
|
|
@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client)
|
|||
}
|
||||
|
||||
/*
|
||||
* If the 'label' property is not present for the AT24 EEPROM,
|
||||
* then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
|
||||
* and this will append the 'devid' to the name of the NVMEM
|
||||
* device. This is purely legacy and the AT24 driver has always
|
||||
* defaulted to this. However, if the 'label' property is
|
||||
* present then this means that the name is specified by the
|
||||
* firmware and this name should be used verbatim and so it is
|
||||
* not necessary to append the 'devid'.
|
||||
* We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
|
||||
* label property is set as some platform can have multiple eeproms
|
||||
* with same label and we can not register each of those with same
|
||||
* label. Failing to register those eeproms trigger cascade failure
|
||||
* on such platform.
|
||||
*/
|
||||
nvmem_config.id = NVMEM_DEVID_AUTO;
|
||||
|
||||
if (device_property_present(dev, "label")) {
|
||||
nvmem_config.id = NVMEM_DEVID_NONE;
|
||||
err = device_property_read_string(dev, "label",
|
||||
&nvmem_config.name);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
nvmem_config.id = NVMEM_DEVID_AUTO;
|
||||
nvmem_config.name = dev_name(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev)
|
|||
{
|
||||
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
||||
wakeup_source_unregister(host->ws);
|
||||
ida_simple_remove(&mmc_host_ida, host->index);
|
||||
if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
|
||||
ida_simple_remove(&mmc_host_ida, host->index);
|
||||
kfree(host);
|
||||
}
|
||||
|
||||
|
@ -499,7 +500,7 @@ static int mmc_first_nonreserved_index(void)
|
|||
*/
|
||||
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
||||
{
|
||||
int err;
|
||||
int index;
|
||||
struct mmc_host *host;
|
||||
int alias_id, min_idx, max_idx;
|
||||
|
||||
|
@ -512,20 +513,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
|||
|
||||
alias_id = of_alias_get_id(dev->of_node, "mmc");
|
||||
if (alias_id >= 0) {
|
||||
min_idx = alias_id;
|
||||
max_idx = alias_id + 1;
|
||||
index = alias_id;
|
||||
} else {
|
||||
min_idx = mmc_first_nonreserved_index();
|
||||
max_idx = 0;
|
||||
|
||||
index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
|
||||
if (index < 0) {
|
||||
kfree(host);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
|
||||
if (err < 0) {
|
||||
kfree(host);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
host->index = err;
|
||||
host->index = index;
|
||||
|
||||
dev_set_name(&host->class_dev, "mmc%d", host->index);
|
||||
host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
|
||||
|
|
|
@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
|
|||
static int bond_ipsec_add_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
int err;
|
||||
|
||||
if (!bond_dev)
|
||||
return -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
bond = netdev_priv(bond_dev);
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
xs->xso.real_dev = slave->dev;
|
||||
bond->xs = xs;
|
||||
if (!slave) {
|
||||
rcu_read_unlock();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!(slave->dev->xfrmdev_ops
|
||||
&& slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
|
||||
ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
|
||||
if (!ipsec) {
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
xs->xso.real_dev = slave->dev;
|
||||
|
||||
err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
|
||||
if (!err) {
|
||||
ipsec->xs = xs;
|
||||
INIT_LIST_HEAD(&ipsec->list);
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
list_add(&ipsec->list, &bond->ipsec_list);
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
} else {
|
||||
kfree(ipsec);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bond_ipsec_add_sa_all(struct bonding *bond)
|
||||
{
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct slave *slave;
|
||||
|
||||
rcu_read_lock();
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
if (!slave)
|
||||
goto out;
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
if (!list_empty(&bond->ipsec_list))
|
||||
slave_warn(bond_dev, slave->dev,
|
||||
"%s: no slave xdo_dev_state_add\n",
|
||||
__func__);
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
ipsec->xs->xso.real_dev = slave->dev;
|
||||
if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
|
||||
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
|
||||
ipsec->xs->xso.real_dev = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
|
|||
static void bond_ipsec_del_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
|
||||
if (!bond_dev)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
bond = netdev_priv(bond_dev);
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
|
||||
if (!slave)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
xs->xso.real_dev = slave->dev;
|
||||
if (!xs->xso.real_dev)
|
||||
goto out;
|
||||
|
||||
if (!(slave->dev->xfrmdev_ops
|
||||
&& slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
|
||||
WARN_ON(xs->xso.real_dev != slave->dev);
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
|
||||
out:
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
if (ipsec->xs == xs) {
|
||||
list_del(&ipsec->list);
|
||||
kfree(ipsec);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void bond_ipsec_del_sa_all(struct bonding *bond)
|
||||
{
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct slave *slave;
|
||||
|
||||
rcu_read_lock();
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
if (!slave) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
if (!ipsec->xs->xso.real_dev)
|
||||
continue;
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
slave_warn(bond_dev, slave->dev,
|
||||
"%s: no slave xdo_dev_state_delete\n",
|
||||
__func__);
|
||||
} else {
|
||||
slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
|
||||
}
|
||||
ipsec->xs->xso.real_dev = NULL;
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
|
|||
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
|
||||
struct net_device *slave_dev = curr_active->dev;
|
||||
struct net_device *real_dev;
|
||||
struct slave *curr_active;
|
||||
struct bonding *bond;
|
||||
int err;
|
||||
|
||||
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
|
||||
return true;
|
||||
bond = netdev_priv(bond_dev);
|
||||
rcu_read_lock();
|
||||
curr_active = rcu_dereference(bond->curr_active_slave);
|
||||
real_dev = curr_active->dev;
|
||||
|
||||
if (!(slave_dev->xfrmdev_ops
|
||||
&& slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
|
||||
slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
|
||||
return false;
|
||||
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
|
||||
err = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
xs->xso.real_dev = slave_dev;
|
||||
return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
|
||||
if (!xs->xso.real_dev) {
|
||||
err = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!real_dev->xfrmdev_ops ||
|
||||
!real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
|
||||
netif_is_bond_master(real_dev)) {
|
||||
err = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct xfrmdev_ops bond_xfrmdev_ops = {
|
||||
|
@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
|
|||
return;
|
||||
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
if (old_active && bond->xs)
|
||||
bond_ipsec_del_sa(bond->xs);
|
||||
bond_ipsec_del_sa_all(bond);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
if (new_active) {
|
||||
|
@ -1067,10 +1193,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
if (new_active && bond->xs) {
|
||||
xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
|
||||
bond_ipsec_add_sa(bond->xs);
|
||||
}
|
||||
bond_ipsec_add_sa_all(bond);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
/* resend IGMP joins since active slave has changed or
|
||||
|
@ -3319,6 +3442,9 @@ static int bond_master_netdev_event(unsigned long event,
|
|||
return bond_event_changename(event_bond);
|
||||
case NETDEV_UNREGISTER:
|
||||
bond_remove_proc_entry(event_bond);
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
break;
|
||||
case NETDEV_REGISTER:
|
||||
bond_create_proc_entry(event_bond);
|
||||
|
@ -4882,7 +5008,8 @@ void bond_setup(struct net_device *bond_dev)
|
|||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
/* set up xfrm device ops (only supported in active-backup right now) */
|
||||
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
|
||||
bond->xs = NULL;
|
||||
INIT_LIST_HEAD(&bond->ipsec_list);
|
||||
spin_lock_init(&bond->ipsec_lock);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
/* don't acquire bond device's netif_tx_lock when transmitting */
|
||||
|
|
|
@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX
|
|||
config NET_DSA_MV88E6XXX_PTP
|
||||
bool "PTP support for Marvell 88E6xxx"
|
||||
default n
|
||||
depends on PTP_1588_CLOCK
|
||||
depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
|
||||
help
|
||||
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
|
||||
chips that support it.
|
||||
|
|
|
@ -378,6 +378,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
|
|||
if (dsa_is_cpu_port(ds, port))
|
||||
v->pvid = true;
|
||||
list_add(&v->list, &priv->dsa_8021q_vlans);
|
||||
|
||||
v = kmemdup(v, sizeof(*v), GFP_KERNEL);
|
||||
if (!v)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add(&v->list, &priv->bridge_vlans);
|
||||
}
|
||||
|
||||
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
|
||||
|
|
|
@ -1640,11 +1640,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
|||
|
||||
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
|
||||
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
|
||||
u16 vlan_proto = tpa_info->metadata >>
|
||||
RX_CMP_FLAGS2_METADATA_TPID_SFT;
|
||||
__be16 vlan_proto = htons(tpa_info->metadata >>
|
||||
RX_CMP_FLAGS2_METADATA_TPID_SFT);
|
||||
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
|
||||
if (eth_type_vlan(vlan_proto)) {
|
||||
__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
|
||||
} else {
|
||||
dev_kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
skb_checksum_none_assert(skb);
|
||||
|
@ -1865,9 +1870,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
|||
(skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
|
||||
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
|
||||
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
|
||||
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
|
||||
__be16 vlan_proto = htons(meta_data >>
|
||||
RX_CMP_FLAGS2_METADATA_TPID_SFT);
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
|
||||
if (eth_type_vlan(vlan_proto)) {
|
||||
__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
|
||||
} else {
|
||||
dev_kfree_skb(skb);
|
||||
goto next_rx;
|
||||
}
|
||||
}
|
||||
|
||||
skb_checksum_none_assert(skb);
|
||||
|
@ -10093,6 +10104,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
|
|||
{
|
||||
int rc = 0;
|
||||
|
||||
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
|
||||
netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
|
||||
rc = -ENODEV;
|
||||
goto half_open_err;
|
||||
}
|
||||
|
||||
rc = bnxt_alloc_mem(bp, false);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
|
||||
|
@ -11849,10 +11866,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
|
|||
(bp->fw_reset_max_dsecs * HZ / 10));
|
||||
}
|
||||
|
||||
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
|
||||
{
|
||||
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
|
||||
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
|
||||
bnxt_ulp_start(bp, rc);
|
||||
bnxt_dl_health_status_update(bp, false);
|
||||
}
|
||||
bp->fw_reset_state = 0;
|
||||
dev_close(bp->dev);
|
||||
}
|
||||
|
||||
static void bnxt_fw_reset_task(struct work_struct *work)
|
||||
{
|
||||
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
|
||||
netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
|
||||
|
@ -11882,6 +11910,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
}
|
||||
bp->fw_reset_timestamp = jiffies;
|
||||
rtnl_lock();
|
||||
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
|
||||
bnxt_fw_reset_abort(bp, rc);
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
bnxt_fw_reset_close(bp);
|
||||
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
|
||||
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
|
||||
|
@ -11929,6 +11962,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
if (val == 0xffff) {
|
||||
if (bnxt_fw_reset_timeout(bp)) {
|
||||
netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
|
||||
rc = -ETIMEDOUT;
|
||||
goto fw_reset_abort;
|
||||
}
|
||||
bnxt_queue_fw_reset_work(bp, HZ / 1000);
|
||||
|
@ -11938,6 +11972,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
|
||||
if (pci_enable_device(bp->pdev)) {
|
||||
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
|
||||
rc = -ENODEV;
|
||||
goto fw_reset_abort;
|
||||
}
|
||||
pci_set_master(bp->pdev);
|
||||
|
@ -11964,9 +11999,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
}
|
||||
rc = bnxt_open(bp->dev);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "bnxt_open_nic() failed\n");
|
||||
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
|
||||
dev_close(bp->dev);
|
||||
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
|
||||
bnxt_fw_reset_abort(bp, rc);
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
bp->fw_reset_state = 0;
|
||||
|
@ -11993,12 +12029,8 @@ fw_reset_abort_status:
|
|||
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
|
||||
}
|
||||
fw_reset_abort:
|
||||
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
|
||||
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
|
||||
bnxt_dl_health_status_update(bp, false);
|
||||
bp->fw_reset_state = 0;
|
||||
rtnl_lock();
|
||||
dev_close(bp->dev);
|
||||
bnxt_fw_reset_abort(bp, rc);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -13315,7 +13347,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
|
|||
if (netif_running(netdev))
|
||||
bnxt_close(netdev);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
if (pci_is_enabled(pdev))
|
||||
pci_disable_device(pdev);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
|
|
|
@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
|
|||
if (!edev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
edev->en_ops = &bnxt_en_ops_tbl;
|
||||
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
|
||||
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
|
||||
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
|
||||
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
|
||||
edev->net = dev;
|
||||
edev->pdev = bp->pdev;
|
||||
edev->l2_db_size = bp->db_size;
|
||||
edev->l2_db_size_nc = bp->db_size;
|
||||
bp->edev = edev;
|
||||
}
|
||||
edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
|
||||
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
|
||||
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
|
||||
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
|
||||
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
|
||||
return bp->edev;
|
||||
}
|
||||
EXPORT_SYMBOL(bnxt_ulp_probe);
|
||||
|
|
|
@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
|
|||
* bits 32:47 indicate the PVF num.
|
||||
*/
|
||||
for (q_no = 0; q_no < ern; q_no++) {
|
||||
reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
|
||||
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
|
||||
|
||||
/* for VF assigned queues. */
|
||||
if (q_no < oct->sriov_info.pf_srn) {
|
||||
|
|
|
@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
|
|||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!is_uld(adap))
|
||||
return;
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
list_del(&adap->list_node);
|
||||
|
||||
|
@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
|
|||
*/
|
||||
destroy_workqueue(adapter->workq);
|
||||
|
||||
if (is_uld(adapter)) {
|
||||
detach_ulds(adapter);
|
||||
t4_uld_clean_up(adapter);
|
||||
}
|
||||
detach_ulds(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(adapter->port[i]);
|
||||
|
||||
t4_uld_clean_up(adapter);
|
||||
|
||||
adap_free_hma_mem(adapter);
|
||||
|
||||
|
@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
|
|||
|
||||
cxgb4_free_mps_ref_entries(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(adapter->port[i]);
|
||||
|
||||
debugfs_remove_recursive(adapter->debugfs_root);
|
||||
|
||||
if (!is_t4(adapter->params.chip))
|
||||
|
|
|
@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
|
|||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!is_uld(adap))
|
||||
return;
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
for (i = 0; i < CXGB4_ULD_MAX; i++) {
|
||||
if (!adap->uld[i].handle)
|
||||
|
|
|
@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
err = dpaa2_switch_seed_bp(ethsw);
|
||||
if (err)
|
||||
goto err_free_dpbp;
|
||||
|
||||
err = dpaa2_switch_alloc_rings(ethsw);
|
||||
if (err)
|
||||
goto err_drain_dpbp;
|
||||
goto err_free_dpbp;
|
||||
|
||||
err = dpaa2_switch_setup_dpio(ethsw);
|
||||
if (err)
|
||||
goto err_destroy_rings;
|
||||
|
||||
err = dpaa2_switch_seed_bp(ethsw);
|
||||
if (err)
|
||||
goto err_deregister_dpio;
|
||||
|
||||
err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
|
||||
if (err) {
|
||||
dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
|
||||
goto err_deregister_dpio;
|
||||
goto err_drain_dpbp;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_drain_dpbp:
|
||||
dpaa2_switch_drain_bp(ethsw);
|
||||
err_deregister_dpio:
|
||||
dpaa2_switch_free_dpio(ethsw);
|
||||
err_destroy_rings:
|
||||
dpaa2_switch_destroy_rings(ethsw);
|
||||
err_drain_dpbp:
|
||||
dpaa2_switch_drain_bp(ethsw);
|
||||
err_free_dpbp:
|
||||
dpaa2_switch_free_dpbp(ethsw);
|
||||
|
||||
|
|
|
@ -1349,13 +1349,16 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
err = register_netdev(dev);
|
||||
if (err)
|
||||
goto abort_with_wq;
|
||||
goto abort_with_gve_init;
|
||||
|
||||
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
|
||||
gve_clear_probe_in_progress(priv);
|
||||
queue_work(priv->gve_wq, &priv->service_task);
|
||||
return 0;
|
||||
|
||||
abort_with_gve_init:
|
||||
gve_teardown_priv_resources(priv);
|
||||
|
||||
abort_with_wq:
|
||||
destroy_workqueue(priv->gve_wq);
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@
|
|||
/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
|
||||
#define PPE_BUF_SIZE_SHIFT 6
|
||||
#define PPE_TX_BUF_HOLD BIT(31)
|
||||
#define CACHE_LINE_MASK 0x3F
|
||||
#define SOC_CACHE_LINE_MASK 0x3F
|
||||
#else
|
||||
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
|
||||
#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
|
||||
|
@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
#if defined(CONFIG_HI13X1_GMAC)
|
||||
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
|
||||
| TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
|
||||
desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
|
||||
desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
|
||||
desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
|
||||
desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
|
||||
#else
|
||||
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
|
||||
desc->send_addr = (__force u32)cpu_to_be32(phys);
|
||||
|
|
|
@ -135,7 +135,8 @@ struct hclge_mbx_vf_to_pf_cmd {
|
|||
u8 mbx_need_resp;
|
||||
u8 rsv1[1];
|
||||
u8 msg_len;
|
||||
u8 rsv2[3];
|
||||
u8 rsv2;
|
||||
u16 match_id;
|
||||
struct hclge_vf_to_pf_msg msg;
|
||||
};
|
||||
|
||||
|
@ -145,7 +146,8 @@ struct hclge_mbx_pf_to_vf_cmd {
|
|||
u8 dest_vfid;
|
||||
u8 rsv[3];
|
||||
u8 msg_len;
|
||||
u8 rsv1[3];
|
||||
u8 rsv1;
|
||||
u16 match_id;
|
||||
struct hclge_pf_to_vf_msg msg;
|
||||
};
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
|
|||
|
||||
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
|
||||
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
|
||||
resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
|
||||
|
||||
resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
|
||||
resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
|
||||
|
|
|
@ -2621,6 +2621,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
|
|||
|
||||
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hnae3_handle *nic = &hdev->nic;
|
||||
int ret;
|
||||
|
||||
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to enable rx vlan offload, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
|
||||
false);
|
||||
}
|
||||
|
|
|
@ -1707,7 +1707,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
tx_send_failed++;
|
||||
tx_dropped++;
|
||||
ret = NETDEV_TX_OK;
|
||||
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1729,6 +1728,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
dev_kfree_skb_any(skb);
|
||||
tx_send_failed++;
|
||||
tx_dropped++;
|
||||
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -7664,6 +7664,7 @@ err_flashmap:
|
|||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_mem_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
|
|
@ -2227,6 +2227,7 @@ err_sw_init:
|
|||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
err_alloc_netdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_mem_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
|
|
@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
|
|
@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
|
|||
**/
|
||||
static int igb_request_msix(struct igb_adapter *adapter)
|
||||
{
|
||||
unsigned int num_q_vectors = adapter->num_q_vectors;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int i, err = 0, vector = 0, free_vector = 0;
|
||||
|
||||
|
@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
|
|||
if (err)
|
||||
goto err_out;
|
||||
|
||||
for (i = 0; i < adapter->num_q_vectors; i++) {
|
||||
if (num_q_vectors > MAX_Q_VECTORS) {
|
||||
num_q_vectors = MAX_Q_VECTORS;
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"The number of queue vectors (%d) is higher than max allowed (%d)\n",
|
||||
adapter->num_q_vectors, MAX_Q_VECTORS);
|
||||
}
|
||||
for (i = 0; i < num_q_vectors; i++) {
|
||||
struct igb_q_vector *q_vector = adapter->q_vector[i];
|
||||
|
||||
vector++;
|
||||
|
@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
|
|||
**/
|
||||
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
|
||||
{
|
||||
struct igb_ring *ring = adapter->tx_ring[queue];
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct igb_ring *ring;
|
||||
u32 tqavcc, tqavctrl;
|
||||
u16 value;
|
||||
|
||||
WARN_ON(hw->mac.type != e1000_i210);
|
||||
WARN_ON(queue < 0 || queue > 1);
|
||||
ring = adapter->tx_ring[queue];
|
||||
|
||||
/* If any of the Qav features is enabled, configure queues as SR and
|
||||
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
|
||||
|
@ -3615,6 +3623,7 @@ err_sw_init:
|
|||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_mem_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
|
|||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
tx_buffer->next_to_watch = NULL;
|
||||
|
||||
/* move us one more past the eop_desc for start of next pkt */
|
||||
tx_buffer++;
|
||||
i++;
|
||||
|
|
|
@ -560,7 +560,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
|
|||
if (hw->phy.ops.read_reg)
|
||||
return hw->phy.ops.read_reg(hw, offset, data);
|
||||
|
||||
return 0;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
void igc_reinit_locked(struct igc_adapter *);
|
||||
|
|
|
@ -217,6 +217,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
|
|||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
tx_buffer->next_to_watch = NULL;
|
||||
|
||||
/* move us one more past the eop_desc for start of next pkt */
|
||||
tx_buffer++;
|
||||
i++;
|
||||
|
@ -5594,6 +5596,7 @@ err_sw_init:
|
|||
err_ioremap:
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_mem_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
|
|
@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
if (ring_uses_build_skb(rx_ring)) {
|
||||
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
|
||||
unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
|
||||
unsigned long offset = (unsigned long)(skb->data) & mask;
|
||||
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
IXGBE_CB(skb)->dma,
|
||||
|
@ -11069,6 +11070,7 @@ err_ioremap:
|
|||
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
pci_release_mem_regions(pdev);
|
||||
err_pci_reg:
|
||||
err_dma:
|
||||
|
|
|
@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
|
|||
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
|
||||
u32 *mykey, u32 *mysalt)
|
||||
{
|
||||
struct net_device *dev = xs->xso.dev;
|
||||
struct net_device *dev = xs->xso.real_dev;
|
||||
unsigned char *key_data;
|
||||
char *alg_name = NULL;
|
||||
int key_len;
|
||||
|
@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
|
|||
**/
|
||||
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *dev = xs->xso.dev;
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(dev);
|
||||
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
|
||||
struct net_device *dev = xs->xso.real_dev;
|
||||
struct ixgbevf_adapter *adapter;
|
||||
struct ixgbevf_ipsec *ipsec;
|
||||
u16 sa_idx;
|
||||
int ret;
|
||||
|
||||
adapter = netdev_priv(dev);
|
||||
ipsec = adapter->ipsec;
|
||||
|
||||
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
|
||||
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
|
||||
xs->id.proto);
|
||||
|
@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
|
|||
**/
|
||||
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *dev = xs->xso.dev;
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(dev);
|
||||
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
|
||||
struct net_device *dev = xs->xso.real_dev;
|
||||
struct ixgbevf_adapter *adapter;
|
||||
struct ixgbevf_ipsec *ipsec;
|
||||
u16 sa_idx;
|
||||
|
||||
adapter = netdev_priv(dev);
|
||||
ipsec = adapter->ipsec;
|
||||
|
||||
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
|
||||
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
|
||||
|
||||
|
|
|
@ -1287,6 +1287,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
|
|||
}
|
||||
|
||||
static int ocelot_netdevice_changeupper(struct net_device *dev,
|
||||
struct net_device *brport_dev,
|
||||
struct netdev_notifier_changeupper_info *info)
|
||||
{
|
||||
struct netlink_ext_ack *extack;
|
||||
|
@ -1296,11 +1297,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
|
|||
|
||||
if (netif_is_bridge_master(info->upper_dev)) {
|
||||
if (info->linking)
|
||||
err = ocelot_netdevice_bridge_join(dev, dev,
|
||||
err = ocelot_netdevice_bridge_join(dev, brport_dev,
|
||||
info->upper_dev,
|
||||
extack);
|
||||
else
|
||||
err = ocelot_netdevice_bridge_leave(dev, dev,
|
||||
err = ocelot_netdevice_bridge_leave(dev, brport_dev,
|
||||
info->upper_dev);
|
||||
}
|
||||
if (netif_is_lag_master(info->upper_dev)) {
|
||||
|
@ -1335,7 +1336,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
|
|||
if (ocelot_port->bond != dev)
|
||||
return NOTIFY_OK;
|
||||
|
||||
err = ocelot_netdevice_changeupper(lower, info);
|
||||
err = ocelot_netdevice_changeupper(lower, dev, info);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
}
|
||||
|
@ -1374,7 +1375,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
|
|||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
|
||||
if (ocelot_netdevice_dev_check(dev))
|
||||
return ocelot_netdevice_changeupper(dev, info);
|
||||
return ocelot_netdevice_changeupper(dev, dev, info);
|
||||
|
||||
if (netif_is_lag_master(dev))
|
||||
return ocelot_netdevice_lag_changeupper(dev, info);
|
||||
|
|
|
@ -5085,7 +5085,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
|
|||
new_bus->priv = tp;
|
||||
new_bus->parent = &pdev->dev;
|
||||
new_bus->irq[0] = PHY_MAC_INTERRUPT;
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
|
||||
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
|
||||
|
||||
new_bus->read = r8169_mdio_read_reg;
|
||||
new_bus->write = r8169_mdio_write_reg;
|
||||
|
|
|
@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
|
|||
* maximum size.
|
||||
*/
|
||||
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
|
||||
tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
|
||||
n_xdp_tx = num_possible_cpus();
|
||||
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
|
||||
|
||||
|
@ -181,7 +182,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
|
|||
efx->xdp_tx_queue_count = 0;
|
||||
} else {
|
||||
efx->n_xdp_channels = n_xdp_ev;
|
||||
efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
|
||||
efx->xdp_tx_per_channel = tx_per_ev;
|
||||
efx->xdp_tx_queue_count = n_xdp_tx;
|
||||
n_channels += n_xdp_ev;
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
|
@ -891,18 +892,20 @@ int efx_set_channels(struct efx_nic *efx)
|
|||
if (efx_channel_is_xdp_tx(channel)) {
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
tx_queue->queue = next_queue++;
|
||||
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
|
||||
channel->channel, tx_queue->label,
|
||||
xdp_queue_number, tx_queue->queue);
|
||||
|
||||
/* We may have a few left-over XDP TX
|
||||
* queues owing to xdp_tx_queue_count
|
||||
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
|
||||
* We still allocate and probe those
|
||||
* TXQs, but never use them.
|
||||
*/
|
||||
if (xdp_queue_number < efx->xdp_tx_queue_count)
|
||||
if (xdp_queue_number < efx->xdp_tx_queue_count) {
|
||||
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
|
||||
channel->channel, tx_queue->label,
|
||||
xdp_queue_number, tx_queue->queue);
|
||||
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
|
||||
xdp_queue_number++;
|
||||
xdp_queue_number++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
|
@ -914,8 +917,7 @@ int efx_set_channels(struct efx_nic *efx)
|
|||
}
|
||||
}
|
||||
}
|
||||
if (xdp_queue_number)
|
||||
efx->xdp_tx_queue_count = xdp_queue_number;
|
||||
WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
|
||||
|
||||
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
|
||||
if (rc)
|
||||
|
|
|
@ -7170,6 +7170,7 @@ int stmmac_suspend(struct device *dev)
|
|||
priv->plat->rx_queues_to_use, false);
|
||||
|
||||
stmmac_fpe_handshake(priv, false);
|
||||
stmmac_fpe_stop_wq(priv);
|
||||
}
|
||||
|
||||
priv->speed = SPEED_UNKNOWN;
|
||||
|
|
|
@ -399,6 +399,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
|||
struct device_node *np = pdev->dev.of_node;
|
||||
struct plat_stmmacenet_data *plat;
|
||||
struct stmmac_dma_cfg *dma_cfg;
|
||||
int phy_mode;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
|
@ -414,10 +415,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
|
|||
eth_zero_addr(mac);
|
||||
}
|
||||
|
||||
plat->phy_interface = device_get_phy_mode(&pdev->dev);
|
||||
if (plat->phy_interface < 0)
|
||||
return ERR_PTR(plat->phy_interface);
|
||||
phy_mode = device_get_phy_mode(&pdev->dev);
|
||||
if (phy_mode < 0)
|
||||
return ERR_PTR(phy_mode);
|
||||
|
||||
plat->phy_interface = phy_mode;
|
||||
plat->interface = stmmac_of_get_mac_mode(np);
|
||||
if (plat->interface < 0)
|
||||
plat->interface = plat->phy_interface;
|
||||
|
|
|
@ -78,6 +78,11 @@ enum {
|
|||
/* Temperature read register (88E2110 only) */
|
||||
MV_PCS_TEMP = 0x8042,
|
||||
|
||||
/* Number of ports on the device */
|
||||
MV_PCS_PORT_INFO = 0xd00d,
|
||||
MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
|
||||
MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
|
||||
|
||||
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
|
||||
* registers appear to set themselves to the 0x800X when AN is
|
||||
* restarted, but status registers appear readable from either.
|
||||
|
@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static int mv3310_get_number_of_ports(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
|
||||
ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
|
||||
|
||||
return ret + 1;
|
||||
}
|
||||
|
||||
static int mv3310_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return mv3310_get_number_of_ports(phydev) == 1;
|
||||
}
|
||||
|
||||
static int mv3340_match_phy_device(struct phy_device *phydev)
|
||||
{
|
||||
return mv3310_get_number_of_ports(phydev) == 4;
|
||||
}
|
||||
|
||||
static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
|
||||
{
|
||||
int val;
|
||||
|
@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
|
|||
static struct phy_driver mv3310_drivers[] = {
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88X3310,
|
||||
.phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
|
||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
.match_phy_device = mv3310_match_phy_device,
|
||||
.name = "mv88x3310",
|
||||
.driver_data = &mv3310_type,
|
||||
.get_features = mv3310_get_features,
|
||||
|
@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
|
|||
.set_loopback = genphy_c45_loopback,
|
||||
},
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88X3340,
|
||||
.phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
|
||||
.phy_id = MARVELL_PHY_ID_88X3310,
|
||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
.match_phy_device = mv3340_match_phy_device,
|
||||
.name = "mv88x3340",
|
||||
.driver_data = &mv3340_type,
|
||||
.get_features = mv3310_get_features,
|
||||
|
@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
|
|||
module_phy_driver(mv3310_drivers);
|
||||
|
||||
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
|
||||
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
|
||||
{ MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
|
||||
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
|
||||
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -2496,7 +2496,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
hso_net_init);
|
||||
if (!net) {
|
||||
dev_err(&interface->dev, "Unable to create ethernet device\n");
|
||||
goto exit;
|
||||
goto err_hso_dev;
|
||||
}
|
||||
|
||||
hso_net = netdev_priv(net);
|
||||
|
@ -2509,13 +2509,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
USB_DIR_IN);
|
||||
if (!hso_net->in_endp) {
|
||||
dev_err(&interface->dev, "Can't find BULK IN endpoint\n");
|
||||
goto exit;
|
||||
goto err_net;
|
||||
}
|
||||
hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
|
||||
USB_DIR_OUT);
|
||||
if (!hso_net->out_endp) {
|
||||
dev_err(&interface->dev, "Can't find BULK OUT endpoint\n");
|
||||
goto exit;
|
||||
goto err_net;
|
||||
}
|
||||
SET_NETDEV_DEV(net, &interface->dev);
|
||||
SET_NETDEV_DEVTYPE(net, &hso_type);
|
||||
|
@ -2524,18 +2524,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
||||
hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!hso_net->mux_bulk_rx_urb_pool[i])
|
||||
goto exit;
|
||||
goto err_mux_bulk_rx;
|
||||
hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (!hso_net->mux_bulk_rx_buf_pool[i])
|
||||
goto exit;
|
||||
goto err_mux_bulk_rx;
|
||||
}
|
||||
hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!hso_net->mux_bulk_tx_urb)
|
||||
goto exit;
|
||||
goto err_mux_bulk_rx;
|
||||
hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
|
||||
if (!hso_net->mux_bulk_tx_buf)
|
||||
goto exit;
|
||||
goto err_free_tx_urb;
|
||||
|
||||
add_net_device(hso_dev);
|
||||
|
||||
|
@ -2543,7 +2543,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
result = register_netdev(net);
|
||||
if (result) {
|
||||
dev_err(&interface->dev, "Failed to register device\n");
|
||||
goto exit;
|
||||
goto err_free_tx_buf;
|
||||
}
|
||||
|
||||
hso_log_port(hso_dev);
|
||||
|
@ -2551,8 +2551,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
|||
hso_create_rfkill(hso_dev, interface);
|
||||
|
||||
return hso_dev;
|
||||
exit:
|
||||
hso_free_net_device(hso_dev, true);
|
||||
|
||||
err_free_tx_buf:
|
||||
remove_net_device(hso_dev);
|
||||
kfree(hso_net->mux_bulk_tx_buf);
|
||||
err_free_tx_urb:
|
||||
usb_free_urb(hso_net->mux_bulk_tx_urb);
|
||||
err_mux_bulk_rx:
|
||||
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
||||
usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
|
||||
kfree(hso_net->mux_bulk_rx_buf_pool[i]);
|
||||
}
|
||||
err_net:
|
||||
free_netdev(net);
|
||||
err_hso_dev:
|
||||
kfree(hso_dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -880,7 +880,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
|
|||
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
|
||||
cmnd->write_zeroes.length =
|
||||
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
|
||||
cmnd->write_zeroes.control = 0;
|
||||
if (nvme_ns_has_pi(ns))
|
||||
cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
|
||||
else
|
||||
cmnd->write_zeroes.control = 0;
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -2591,7 +2591,9 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
|
||||
int result;
|
||||
|
||||
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
|
||||
if (dev->ctrl.state != NVME_CTRL_RESETTING) {
|
||||
dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
|
||||
dev->ctrl.state);
|
||||
result = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
@ -2998,7 +3000,6 @@ static void nvme_remove(struct pci_dev *pdev)
|
|||
if (!pci_device_is_present(pdev)) {
|
||||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
|
||||
nvme_dev_disable(dev, true);
|
||||
nvme_dev_remove_admin(dev);
|
||||
}
|
||||
|
||||
flush_work(&dev->ctrl.reset_work);
|
||||
|
|
|
@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||
}
|
||||
}
|
||||
|
||||
if (state->period != cstate->period ||
|
||||
state->duty_cycle != cstate->duty_cycle) {
|
||||
ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
|
||||
state->period);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
|
||||
state->period);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
|
||||
} else if (cstate->enabled) {
|
||||
|
|
|
@ -366,9 +366,8 @@ static struct hi6421_regulator_info
|
|||
|
||||
static int hi6421_regulator_enable(struct regulator_dev *rdev)
|
||||
{
|
||||
struct hi6421_regulator_pdata *pdata;
|
||||
struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
|
||||
|
||||
pdata = dev_get_drvdata(rdev->dev.parent);
|
||||
/* hi6421 spec requires regulator enablement must be serialized:
|
||||
* - Because when BUCK, LDO switching from off to on, it will have
|
||||
* a huge instantaneous current; so you can not turn on two or
|
||||
|
@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
|
|||
|
||||
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
|
||||
{
|
||||
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
|
||||
u32 reg_val;
|
||||
struct hi6421_regulator_info *info;
|
||||
unsigned int reg_val;
|
||||
|
||||
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
|
||||
regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val);
|
||||
if (reg_val & info->mode_mask)
|
||||
return REGULATOR_MODE_IDLE;
|
||||
|
@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
|
|||
|
||||
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
|
||||
{
|
||||
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
|
||||
u32 reg_val;
|
||||
struct hi6421_regulator_info *info;
|
||||
unsigned int reg_val;
|
||||
|
||||
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
|
||||
regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val);
|
||||
if (reg_val & info->mode_mask)
|
||||
return REGULATOR_MODE_STANDBY;
|
||||
|
@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
|
|||
static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
|
||||
unsigned int mode)
|
||||
{
|
||||
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
|
||||
u32 new_mode;
|
||||
struct hi6421_regulator_info *info;
|
||||
unsigned int new_mode;
|
||||
|
||||
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
|
||||
switch (mode) {
|
||||
case REGULATOR_MODE_NORMAL:
|
||||
new_mode = 0;
|
||||
|
@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
|
|||
static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
|
||||
unsigned int mode)
|
||||
{
|
||||
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
|
||||
u32 new_mode;
|
||||
struct hi6421_regulator_info *info;
|
||||
unsigned int new_mode;
|
||||
|
||||
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
|
||||
switch (mode) {
|
||||
case REGULATOR_MODE_NORMAL:
|
||||
new_mode = 0;
|
||||
|
@ -459,7 +462,9 @@ static unsigned int
|
|||
hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
|
||||
int input_uV, int output_uV, int load_uA)
|
||||
{
|
||||
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
|
||||
struct hi6421_regulator_info *info;
|
||||
|
||||
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
|
||||
|
||||
if (load_uA > info->eco_microamp)
|
||||
return REGULATOR_MODE_NORMAL;
|
||||
|
@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
|
|||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
mutex_init(&pdata->lock);
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
|
||||
/* assign per-regulator data */
|
||||
info = &hi6421_regulator_info[i];
|
||||
|
||||
config.dev = pdev->dev.parent;
|
||||
config.driver_data = info;
|
||||
config.driver_data = pdata;
|
||||
config.regmap = pmic->regmap;
|
||||
|
||||
rdev = devm_regulator_register(&pdev->dev, &info->desc,
|
||||
|
|
|
@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
|
|||
struct device *dev = container_of(kobj, struct device, kobj);
|
||||
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
|
||||
struct iscsi_transport *t = iface->transport;
|
||||
int param;
|
||||
int param_type;
|
||||
int param = -1;
|
||||
|
||||
if (attr == &dev_attr_iface_enabled.attr)
|
||||
param = ISCSI_NET_PARAM_IFACE_ENABLE;
|
||||
else if (attr == &dev_attr_iface_vlan_id.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_ID;
|
||||
else if (attr == &dev_attr_iface_vlan_priority.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
|
||||
else if (attr == &dev_attr_iface_vlan_enabled.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_ENABLED;
|
||||
else if (attr == &dev_attr_iface_mtu.attr)
|
||||
param = ISCSI_NET_PARAM_MTU;
|
||||
else if (attr == &dev_attr_iface_port.attr)
|
||||
param = ISCSI_NET_PARAM_PORT;
|
||||
else if (attr == &dev_attr_iface_ipaddress_state.attr)
|
||||
param = ISCSI_NET_PARAM_IPADDR_STATE;
|
||||
else if (attr == &dev_attr_iface_delayed_ack_en.attr)
|
||||
param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
|
||||
else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
|
||||
else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
|
||||
else if (attr == &dev_attr_iface_tcp_wsf.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_WSF;
|
||||
else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
|
||||
else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
|
||||
else if (attr == &dev_attr_iface_cache_id.attr)
|
||||
param = ISCSI_NET_PARAM_CACHE_ID;
|
||||
else if (attr == &dev_attr_iface_redirect_en.attr)
|
||||
param = ISCSI_NET_PARAM_REDIRECT_EN;
|
||||
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
|
||||
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
|
||||
else if (attr == &dev_attr_iface_header_digest.attr)
|
||||
|
@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
|
|||
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
|
||||
else if (attr == &dev_attr_iface_initiator_name.attr)
|
||||
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
|
||||
|
||||
if (param != -1)
|
||||
return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
|
||||
|
||||
if (attr == &dev_attr_iface_vlan_id.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_ID;
|
||||
else if (attr == &dev_attr_iface_vlan_priority.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
|
||||
else if (attr == &dev_attr_iface_vlan_enabled.attr)
|
||||
param = ISCSI_NET_PARAM_VLAN_ENABLED;
|
||||
else if (attr == &dev_attr_iface_mtu.attr)
|
||||
param = ISCSI_NET_PARAM_MTU;
|
||||
else if (attr == &dev_attr_iface_port.attr)
|
||||
param = ISCSI_NET_PARAM_PORT;
|
||||
else if (attr == &dev_attr_iface_ipaddress_state.attr)
|
||||
param = ISCSI_NET_PARAM_IPADDR_STATE;
|
||||
else if (attr == &dev_attr_iface_delayed_ack_en.attr)
|
||||
param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
|
||||
else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
|
||||
else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
|
||||
else if (attr == &dev_attr_iface_tcp_wsf.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_WSF;
|
||||
else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
|
||||
else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
|
||||
param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
|
||||
else if (attr == &dev_attr_iface_cache_id.attr)
|
||||
param = ISCSI_NET_PARAM_CACHE_ID;
|
||||
else if (attr == &dev_attr_iface_redirect_en.attr)
|
||||
param = ISCSI_NET_PARAM_REDIRECT_EN;
|
||||
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
|
||||
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
|
||||
param = ISCSI_NET_PARAM_IPV4_ADDR;
|
||||
|
@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
|
|||
return 0;
|
||||
}
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
|
||||
case ISCSI_IFACE_PARAM_HDRDGST_EN:
|
||||
case ISCSI_IFACE_PARAM_DATADGST_EN:
|
||||
case ISCSI_IFACE_PARAM_IMM_DATA_EN:
|
||||
case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
|
||||
case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
|
||||
case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
|
||||
case ISCSI_IFACE_PARAM_ERL:
|
||||
case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
|
||||
case ISCSI_IFACE_PARAM_FIRST_BURST:
|
||||
case ISCSI_IFACE_PARAM_MAX_R2T:
|
||||
case ISCSI_IFACE_PARAM_MAX_BURST:
|
||||
case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
|
||||
case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
|
||||
case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
|
||||
case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
|
||||
case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
|
||||
case ISCSI_IFACE_PARAM_INITIATOR_NAME:
|
||||
param_type = ISCSI_IFACE_PARAM;
|
||||
break;
|
||||
default:
|
||||
param_type = ISCSI_NET_PARAM;
|
||||
}
|
||||
|
||||
return t->attr_is_visible(param_type, param);
|
||||
return t->attr_is_visible(ISCSI_NET_PARAM, param);
|
||||
}
|
||||
|
||||
static struct attribute *iscsi_iface_attrs[] = {
|
||||
|
|
|
@ -84,6 +84,7 @@ MODULE_PARM_DESC(polling_limit_us,
|
|||
* struct bcm2835_spi - BCM2835 SPI controller
|
||||
* @regs: base address of register map
|
||||
* @clk: core clock, divided to calculate serial clock
|
||||
* @clk_hz: core clock cached speed
|
||||
* @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
|
||||
* @tfr: SPI transfer currently processed
|
||||
* @ctlr: SPI controller reverse lookup
|
||||
|
@ -124,6 +125,7 @@ MODULE_PARM_DESC(polling_limit_us,
|
|||
struct bcm2835_spi {
|
||||
void __iomem *regs;
|
||||
struct clk *clk;
|
||||
unsigned long clk_hz;
|
||||
int irq;
|
||||
struct spi_transfer *tfr;
|
||||
struct spi_controller *ctlr;
|
||||
|
@ -1082,19 +1084,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
|
|||
struct spi_transfer *tfr)
|
||||
{
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
unsigned long spi_hz, clk_hz, cdiv;
|
||||
unsigned long spi_hz, cdiv;
|
||||
unsigned long hz_per_byte, byte_limit;
|
||||
u32 cs = bs->prepare_cs[spi->chip_select];
|
||||
|
||||
/* set clock */
|
||||
spi_hz = tfr->speed_hz;
|
||||
clk_hz = clk_get_rate(bs->clk);
|
||||
|
||||
if (spi_hz >= clk_hz / 2) {
|
||||
if (spi_hz >= bs->clk_hz / 2) {
|
||||
cdiv = 2; /* clk_hz/2 is the fastest we can go */
|
||||
} else if (spi_hz) {
|
||||
/* CDIV must be a multiple of two */
|
||||
cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
|
||||
cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
|
||||
cdiv += (cdiv % 2);
|
||||
|
||||
if (cdiv >= 65536)
|
||||
|
@ -1102,7 +1103,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
|
|||
} else {
|
||||
cdiv = 0; /* 0 is the slowest we can go */
|
||||
}
|
||||
tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
|
||||
tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
|
||||
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
|
||||
|
||||
/* handle all the 3-wire mode */
|
||||
|
@ -1320,6 +1321,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
|||
return bs->irq ? bs->irq : -ENODEV;
|
||||
|
||||
clk_prepare_enable(bs->clk);
|
||||
bs->clk_hz = clk_get_rate(bs->clk);
|
||||
|
||||
err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
|
||||
if (err)
|
||||
|
|
|
@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
|
|||
{
|
||||
unsigned int dummy_clk;
|
||||
|
||||
if (!op->dummy.nbytes)
|
||||
return 0;
|
||||
|
||||
dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
|
||||
if (dtr)
|
||||
dummy_clk /= 2;
|
||||
|
|
|
@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
goto clk_dis_apb;
|
||||
}
|
||||
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
|
||||
if (ret < 0)
|
||||
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
|
||||
|
@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
/* SPI controller initializations */
|
||||
cdns_spi_init_hw(xspi);
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq <= 0) {
|
||||
ret = -ENXIO;
|
||||
|
@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
|
||||
master->bits_per_word_mask = SPI_BPW_MASK(8);
|
||||
|
||||
pm_runtime_mark_last_busy(&pdev->dev);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
ret = spi_register_master(master);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "spi_register_master failed\n");
|
||||
|
|
|
@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
|
|||
mtk_spi_setup_packet(master);
|
||||
|
||||
cnt = xfer->len / 4;
|
||||
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
|
||||
if (xfer->tx_buf)
|
||||
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
|
||||
|
||||
if (xfer->rx_buf)
|
||||
ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
|
||||
|
||||
remainder = xfer->len % 4;
|
||||
if (remainder > 0) {
|
||||
reg_val = 0;
|
||||
memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder);
|
||||
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
|
||||
if (xfer->tx_buf) {
|
||||
memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder);
|
||||
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
|
||||
}
|
||||
if (xfer->rx_buf) {
|
||||
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
|
||||
memcpy(xfer->rx_buf + (cnt * 4), ®_val, remainder);
|
||||
}
|
||||
}
|
||||
|
||||
mtk_spi_enable_transfer(master);
|
||||
|
|
|
@ -1925,6 +1925,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
|||
master->can_dma = stm32_spi_can_dma;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = spi_register_master(master);
|
||||
|
@ -1940,6 +1941,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
|
|||
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
err_dma_release:
|
||||
if (spi->dma_tx)
|
||||
dma_release_channel(spi->dma_tx);
|
||||
|
@ -1956,9 +1959,14 @@ static int stm32_spi_remove(struct platform_device *pdev)
|
|||
struct spi_master *master = platform_get_drvdata(pdev);
|
||||
struct stm32_spi *spi = spi_master_get_devdata(master);
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
spi_unregister_master(master);
|
||||
spi->cfg->disable(spi);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
if (master->dma_tx)
|
||||
dma_release_channel(master->dma_tx);
|
||||
if (master->dma_rx)
|
||||
|
@ -1966,7 +1974,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
|
|||
|
||||
clk_disable_unprepare(spi->clk);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
pinctrl_pm_select_sleep_state(&pdev->dev);
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "target_core_alua.h"
|
||||
|
||||
static sense_reason_t
|
||||
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
|
||||
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
|
||||
static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
|
||||
|
||||
static sense_reason_t
|
||||
|
@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
|
|||
}
|
||||
|
||||
static sense_reason_t
|
||||
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
|
||||
sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
|
||||
unsigned int sectors = sbc_get_write_same_sectors(cmd);
|
||||
sense_reason_t ret;
|
||||
|
||||
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
|
||||
if ((flags & 0x04) || (flags & 0x02)) {
|
||||
pr_err("WRITE_SAME PBDATA and LBDATA"
|
||||
" bits not supported for Block Discard"
|
||||
" Emulation\n");
|
||||
|
@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
|
|||
}
|
||||
|
||||
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
|
||||
if (flags[0] & 0x10) {
|
||||
if (flags & 0x10) {
|
||||
pr_warn("WRITE SAME with ANCHOR not supported\n");
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
|
|||
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
|
||||
* translated into block discard requests within backend code.
|
||||
*/
|
||||
if (flags[0] & 0x08) {
|
||||
if (flags & 0x08) {
|
||||
if (!ops->execute_unmap)
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
|
@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
|
|||
if (!ops->execute_write_same)
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
|
||||
ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
|
|||
}
|
||||
|
||||
static sense_reason_t
|
||||
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
|
||||
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
|
||||
u32 sectors, bool is_write)
|
||||
{
|
||||
u8 protect = cdb[1] >> 5;
|
||||
int sp_ops = cmd->se_sess->sup_prot_ops;
|
||||
int pi_prot_type = dev->dev_attrib.pi_prot_type;
|
||||
bool fabric_prot = false;
|
||||
|
@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
|
|||
fallthrough;
|
||||
default:
|
||||
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
|
||||
"PROTECT: 0x%02x\n", cdb[0], protect);
|
||||
"PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
|
@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
if (sbc_check_dpofua(dev, cmd, cdb))
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
|
||||
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
size = sbc_get_size(cmd, 1);
|
||||
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
|
||||
|
||||
ret = sbc_setup_write_same(cmd, &cdb[10], ops);
|
||||
ret = sbc_setup_write_same(cmd, cdb[10], ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
size = sbc_get_size(cmd, 1);
|
||||
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
|
||||
|
||||
ret = sbc_setup_write_same(cmd, &cdb[1], ops);
|
||||
ret = sbc_setup_write_same(cmd, cdb[1], ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
|||
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
|
||||
* of byte 1 bit 3 UNMAP instead of original reserved field
|
||||
*/
|
||||
ret = sbc_setup_write_same(cmd, &cdb[1], ops);
|
||||
ret = sbc_setup_write_same(cmd, cdb[1], ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
|
|
@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
|||
INIT_WORK(&cmd->work, success ? target_complete_ok_work :
|
||||
target_complete_failure_work);
|
||||
|
||||
if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
|
||||
if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
|
||||
cpu = cmd->cpuid;
|
||||
else
|
||||
cpu = wwn->cmd_compl_affinity;
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
|
||||
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
|
||||
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
|
||||
#define USB_PING_RESPONSE_TIME 400 /* ns */
|
||||
|
||||
/* Protect struct usb_device->state and ->children members
|
||||
* Note: Both are also protected by ->dev.sem, except that ->state can
|
||||
|
@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
|
|||
}
|
||||
|
||||
/*
|
||||
* Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
|
||||
* either U1 or U2.
|
||||
* Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
|
||||
* U1/U2, send a PING to the device and receive a PING_RESPONSE.
|
||||
* See USB 3.1 section C.1.5.2
|
||||
*/
|
||||
static void usb_set_lpm_mel(struct usb_device *udev,
|
||||
struct usb3_lpm_parameters *udev_lpm_params,
|
||||
|
@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
|
|||
unsigned int hub_exit_latency)
|
||||
{
|
||||
unsigned int total_mel;
|
||||
unsigned int device_mel;
|
||||
unsigned int hub_mel;
|
||||
|
||||
/*
|
||||
* Calculate the time it takes to transition all links from the roothub
|
||||
* to the parent hub into U0. The parent hub must then decode the
|
||||
* packet (hub header decode latency) to figure out which port it was
|
||||
* bound for.
|
||||
*
|
||||
* The Hub Header decode latency is expressed in 0.1us intervals (0x1
|
||||
* means 0.1us). Multiply that by 100 to get nanoseconds.
|
||||
* tMEL1. time to transition path from host to device into U0.
|
||||
* MEL for parent already contains the delay up to parent, so only add
|
||||
* the exit latency for the last link (pick the slower exit latency),
|
||||
* and the hub header decode latency. See USB 3.1 section C 2.2.1
|
||||
* Store MEL in nanoseconds
|
||||
*/
|
||||
total_mel = hub_lpm_params->mel +
|
||||
(hub->descriptor->u.ss.bHubHdrDecLat * 100);
|
||||
max(udev_exit_latency, hub_exit_latency) * 1000 +
|
||||
hub->descriptor->u.ss.bHubHdrDecLat * 100;
|
||||
|
||||
/*
|
||||
* How long will it take to transition the downstream hub's port into
|
||||
* U0? The greater of either the hub exit latency or the device exit
|
||||
* latency.
|
||||
*
|
||||
* The BOS U1/U2 exit latencies are expressed in 1us intervals.
|
||||
* Multiply that by 1000 to get nanoseconds.
|
||||
* tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
|
||||
* each link + wHubDelay for each hub. Add only for last link.
|
||||
* tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
|
||||
* Multiply by 2 to include it as well.
|
||||
*/
|
||||
device_mel = udev_exit_latency * 1000;
|
||||
hub_mel = hub_exit_latency * 1000;
|
||||
if (device_mel > hub_mel)
|
||||
total_mel += device_mel;
|
||||
else
|
||||
total_mel += hub_mel;
|
||||
total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
|
||||
USB_TP_TRANSMISSION_DELAY) * 2;
|
||||
|
||||
/*
|
||||
* tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
|
||||
* after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
|
||||
* to cover the delay if the PING_RESPONSE is queued behind a Max Packet
|
||||
* Size DP.
|
||||
* Note these delays should be added only once for the entire path, so
|
||||
* add them to the MEL of the device connected to the roothub.
|
||||
*/
|
||||
if (!hub->hdev->parent)
|
||||
total_mel += USB_PING_RESPONSE_TIME + 2100;
|
||||
|
||||
udev_lpm_params->mel = total_mel;
|
||||
}
|
||||
|
@ -4090,6 +4094,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't allow device intiated U1/U2 if the system exit latency + one bus
|
||||
* interval is greater than the minimum service interval of any active
|
||||
* periodic endpoint. See USB 3.2 section 9.4.9
|
||||
*/
|
||||
static bool usb_device_may_initiate_lpm(struct usb_device *udev,
|
||||
enum usb3_link_state state)
|
||||
{
|
||||
unsigned int sel; /* us */
|
||||
int i, j;
|
||||
|
||||
if (state == USB3_LPM_U1)
|
||||
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
|
||||
else if (state == USB3_LPM_U2)
|
||||
sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
|
||||
else
|
||||
return false;
|
||||
|
||||
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
|
||||
struct usb_interface *intf;
|
||||
struct usb_endpoint_descriptor *desc;
|
||||
unsigned int interval;
|
||||
|
||||
intf = udev->actconfig->interface[i];
|
||||
if (!intf)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
|
||||
desc = &intf->cur_altsetting->endpoint[j].desc;
|
||||
|
||||
if (usb_endpoint_xfer_int(desc) ||
|
||||
usb_endpoint_xfer_isoc(desc)) {
|
||||
interval = (1 << (desc->bInterval - 1)) * 125;
|
||||
if (sel + 125 > interval)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
|
||||
* U1/U2 entry.
|
||||
|
@ -4162,20 +4207,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
|
|||
* U1/U2_ENABLE
|
||||
*/
|
||||
if (udev->actconfig &&
|
||||
usb_set_device_initiated_lpm(udev, state, true) == 0) {
|
||||
if (state == USB3_LPM_U1)
|
||||
udev->usb3_lpm_u1_enabled = 1;
|
||||
else if (state == USB3_LPM_U2)
|
||||
udev->usb3_lpm_u2_enabled = 1;
|
||||
} else {
|
||||
/* Don't request U1/U2 entry if the device
|
||||
* cannot transition to U1/U2.
|
||||
*/
|
||||
usb_set_lpm_timeout(udev, state, 0);
|
||||
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
|
||||
usb_device_may_initiate_lpm(udev, state)) {
|
||||
if (usb_set_device_initiated_lpm(udev, state, true)) {
|
||||
/*
|
||||
* Request to enable device initiated U1/U2 failed,
|
||||
* better to turn off lpm in this case.
|
||||
*/
|
||||
usb_set_lpm_timeout(udev, state, 0);
|
||||
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (state == USB3_LPM_U1)
|
||||
udev->usb3_lpm_u1_enabled = 1;
|
||||
else if (state == USB3_LPM_U2)
|
||||
udev->usb3_lpm_u2_enabled = 1;
|
||||
}
|
||||
/*
|
||||
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
|
||||
* U1/U2 entry.
|
||||
|
|
|
@ -502,10 +502,6 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
/* DJI CineSSD */
|
||||
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Fibocom L850-GL LTE Modem */
|
||||
{ USB_DEVICE(0x2cb7, 0x0007), .driver_info =
|
||||
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
||||
|
||||
/* INTEL VALUE SSD */
|
||||
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
|
|
|
@ -383,6 +383,9 @@ enum dwc2_ep0_state {
|
|||
* 0 - No (default)
|
||||
* 1 - Partial power down
|
||||
* 2 - Hibernation
|
||||
* @no_clock_gating: Specifies whether to avoid clock gating feature.
|
||||
* 0 - No (use clock gating)
|
||||
* 1 - Yes (avoid it)
|
||||
* @lpm: Enable LPM support.
|
||||
* 0 - No
|
||||
* 1 - Yes
|
||||
|
@ -480,6 +483,7 @@ struct dwc2_core_params {
|
|||
#define DWC2_POWER_DOWN_PARAM_NONE 0
|
||||
#define DWC2_POWER_DOWN_PARAM_PARTIAL 1
|
||||
#define DWC2_POWER_DOWN_PARAM_HIBERNATION 2
|
||||
bool no_clock_gating;
|
||||
|
||||
bool lpm;
|
||||
bool lpm_clock_gating;
|
||||
|
|
|
@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
|
|||
* If neither hibernation nor partial power down are supported,
|
||||
* clock gating is used to save power.
|
||||
*/
|
||||
dwc2_gadget_enter_clock_gating(hsotg);
|
||||
if (!hsotg->params.no_clock_gating)
|
||||
dwc2_gadget_enter_clock_gating(hsotg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Zlp for all endpoints, for ep0 only in DATA IN stage */
|
||||
/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
|
||||
if (hs_ep->send_zlp) {
|
||||
dwc2_hsotg_program_zlp(hsotg, hs_ep);
|
||||
hs_ep->send_zlp = 0;
|
||||
/* transfer will be completed on next complete interrupt */
|
||||
return;
|
||||
if (!using_desc_dma(hsotg)) {
|
||||
dwc2_hsotg_program_zlp(hsotg, hs_ep);
|
||||
/* transfer will be completed on next complete interrupt */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
|
||||
|
@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
|
|||
__func__);
|
||||
}
|
||||
} else {
|
||||
/* Mask GINTSTS_GOUTNAKEFF interrupt */
|
||||
dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
|
||||
|
||||
if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
|
||||
dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
|
||||
|
||||
if (!using_dma(hsotg)) {
|
||||
/* Wait for GINTSTS_RXFLVL interrupt */
|
||||
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
|
||||
GINTSTS_RXFLVL, 100)) {
|
||||
dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
|
||||
__func__);
|
||||
} else {
|
||||
/*
|
||||
* Pop GLOBAL OUT NAK status packet from RxFIFO
|
||||
* to assert GOUTNAKEFF interrupt
|
||||
*/
|
||||
dwc2_readl(hsotg, GRXSTSP);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait for global nak to take effect */
|
||||
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
|
||||
GINTSTS_GOUTNAKEFF, 100))
|
||||
|
@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
|
|||
epctl = dwc2_readl(hs, epreg);
|
||||
|
||||
if (value) {
|
||||
/* Unmask GOUTNAKEFF interrupt */
|
||||
dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
|
||||
|
||||
if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
|
||||
dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
|
||||
// STALL bit will be set in GOUTNAKEFF interrupt handler
|
||||
|
|
|
@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
|
|||
* If not hibernation nor partial power down are supported,
|
||||
* clock gating is used to save power.
|
||||
*/
|
||||
dwc2_host_enter_clock_gating(hsotg);
|
||||
if (!hsotg->params.no_clock_gating)
|
||||
dwc2_host_enter_clock_gating(hsotg);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
|
|||
* If not hibernation nor partial power down are supported,
|
||||
* clock gating is used to save power.
|
||||
*/
|
||||
dwc2_host_enter_clock_gating(hsotg);
|
||||
if (!hsotg->params.no_clock_gating)
|
||||
dwc2_host_enter_clock_gating(hsotg);
|
||||
|
||||
/* After entering suspend, hardware is not accessible */
|
||||
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
|
||||
|
|
|
@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
|
|||
struct dwc2_core_params *p = &hsotg->params;
|
||||
|
||||
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
|
||||
p->no_clock_gating = true;
|
||||
p->phy_utmi_width = 8;
|
||||
}
|
||||
|
||||
|
|
|
@ -3861,6 +3861,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
free_eps:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
tegra_xudc_free_eps(xudc);
|
||||
free_event_ring:
|
||||
tegra_xudc_free_event_ring(xudc);
|
||||
|
|
|
@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup);
|
|||
static irqreturn_t ehci_irq (struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
u32 status, masked_status, pcd_status = 0, cmd;
|
||||
u32 status, current_status, masked_status, pcd_status = 0;
|
||||
u32 cmd;
|
||||
int bh;
|
||||
|
||||
spin_lock(&ehci->lock);
|
||||
|
||||
status = ehci_readl(ehci, &ehci->regs->status);
|
||||
status = 0;
|
||||
current_status = ehci_readl(ehci, &ehci->regs->status);
|
||||
restart:
|
||||
|
||||
/* e.g. cardbus physical eject */
|
||||
if (status == ~(u32) 0) {
|
||||
if (current_status == ~(u32) 0) {
|
||||
ehci_dbg (ehci, "device removed\n");
|
||||
goto dead;
|
||||
}
|
||||
status |= current_status;
|
||||
|
||||
/*
|
||||
* We don't use STS_FLR, but some controllers don't like it to
|
||||
* remain on, so mask it out along with the other status bits.
|
||||
*/
|
||||
masked_status = status & (INTR_MASK | STS_FLR);
|
||||
masked_status = current_status & (INTR_MASK | STS_FLR);
|
||||
|
||||
/* Shared IRQ? */
|
||||
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
|
||||
|
@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
|
|||
|
||||
/* clear (just) interrupts */
|
||||
ehci_writel(ehci, masked_status, &ehci->regs->status);
|
||||
|
||||
/* For edge interrupts, don't race with an interrupt bit being raised */
|
||||
current_status = ehci_readl(ehci, &ehci->regs->status);
|
||||
if (current_status & INTR_MASK)
|
||||
goto restart;
|
||||
|
||||
cmd = ehci_readl(ehci, &ehci->regs->command);
|
||||
bh = 0;
|
||||
|
||||
|
|
|
@ -153,8 +153,6 @@ struct max3421_hcd {
|
|||
*/
|
||||
struct urb *curr_urb;
|
||||
enum scheduling_pass sched_pass;
|
||||
struct usb_device *loaded_dev; /* dev that's loaded into the chip */
|
||||
int loaded_epnum; /* epnum whose toggles are loaded */
|
||||
int urb_done; /* > 0 -> no errors, < 0: errno */
|
||||
size_t curr_len;
|
||||
u8 hien;
|
||||
|
@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
|
|||
* Caller must NOT hold HCD spinlock.
|
||||
*/
|
||||
static void
|
||||
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
|
||||
int force_toggles)
|
||||
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
|
||||
{
|
||||
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
|
||||
int old_epnum, same_ep, rcvtog, sndtog;
|
||||
struct usb_device *old_dev;
|
||||
int rcvtog, sndtog;
|
||||
u8 hctl;
|
||||
|
||||
old_dev = max3421_hcd->loaded_dev;
|
||||
old_epnum = max3421_hcd->loaded_epnum;
|
||||
|
||||
same_ep = (dev == old_dev && epnum == old_epnum);
|
||||
if (same_ep && !force_toggles)
|
||||
return;
|
||||
|
||||
if (old_dev && !same_ep) {
|
||||
/* save the old end-points toggles: */
|
||||
u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
|
||||
|
||||
rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
|
||||
sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
|
||||
|
||||
/* no locking: HCD (i.e., we) own toggles, don't we? */
|
||||
usb_settoggle(old_dev, old_epnum, 0, rcvtog);
|
||||
usb_settoggle(old_dev, old_epnum, 1, sndtog);
|
||||
}
|
||||
/* setup new endpoint's toggle bits: */
|
||||
rcvtog = usb_gettoggle(dev, epnum, 0);
|
||||
sndtog = usb_gettoggle(dev, epnum, 1);
|
||||
hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
|
||||
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
|
||||
|
||||
max3421_hcd->loaded_epnum = epnum;
|
||||
spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
|
||||
|
||||
/*
|
||||
|
@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
|
|||
* address-assignment so it's best to just always load the
|
||||
* address whenever the end-point changed/was forced.
|
||||
*/
|
||||
max3421_hcd->loaded_dev = dev;
|
||||
spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
|
||||
}
|
||||
|
||||
|
@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
|
|||
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
|
||||
struct urb *urb, *curr_urb = NULL;
|
||||
struct max3421_ep *max3421_ep;
|
||||
int epnum, force_toggles = 0;
|
||||
int epnum;
|
||||
struct usb_host_endpoint *ep;
|
||||
struct list_head *pos;
|
||||
unsigned long flags;
|
||||
|
@ -777,7 +752,6 @@ done:
|
|||
usb_settoggle(urb->dev, epnum, 0, 1);
|
||||
usb_settoggle(urb->dev, epnum, 1, 1);
|
||||
max3421_ep->pkt_state = PKT_STATE_SETUP;
|
||||
force_toggles = 1;
|
||||
} else
|
||||
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
|
||||
}
|
||||
|
@ -785,7 +759,7 @@ done:
|
|||
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
|
||||
|
||||
max3421_ep->last_active = max3421_hcd->frame_number;
|
||||
max3421_set_address(hcd, urb->dev, epnum, force_toggles);
|
||||
max3421_set_address(hcd, urb->dev, epnum);
|
||||
max3421_set_speed(hcd, urb->dev);
|
||||
max3421_next_transfer(hcd, 0);
|
||||
return 1;
|
||||
|
@ -1380,6 +1354,16 @@ max3421_urb_done(struct usb_hcd *hcd)
|
|||
status = 0;
|
||||
urb = max3421_hcd->curr_urb;
|
||||
if (urb) {
|
||||
/* save the old end-points toggles: */
|
||||
u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
|
||||
int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
|
||||
int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
|
||||
int epnum = usb_endpoint_num(&urb->ep->desc);
|
||||
|
||||
/* no locking: HCD (i.e., we) own toggles, don't we? */
|
||||
usb_settoggle(urb->dev, epnum, 0, rcvtog);
|
||||
usb_settoggle(urb->dev, epnum, 1, sndtog);
|
||||
|
||||
max3421_hcd->curr_urb = NULL;
|
||||
spin_lock_irqsave(&max3421_hcd->lock, flags);
|
||||
usb_hcd_unlink_urb_from_ep(hcd, urb);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user