mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
This is the 5.14.10 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmFei2oACgkQONu9yGCS aT5o/A/9H+6b4wr5sSVbc+gk6ZI2029SxXhaCEkt7cuRSVYogzk3mcY2+wbtERbE kmpv6Ep5taNOz1qs+YvYc07HJEVVge9VcklxnOX95mkVDr4vOFVocdO2pzxPjJCL 65tD0iXG+Z6TqLdFS5dfjyfHdV2Pp42QzMgQwC2/vpST3bhKi8fKipubUJtWIorS fltXE2nMKa+v6uCg12yR4Tf4mx6zpraBPzECHc0Wxhm+boxPKo6sQoCdscUzI4QP WX4tbIdODXqzCLCenp8z/SPIazcGG7z0Z5K4K3SXH4rE/3m76ZlafnhWwo8Mc6yg /EzPVpgxg5ODjE4RE5Lh/H7ykkh9F66uOuxueJKd8q+CV2Fc7ATXn6S21+LY/hf2 qwdVvMhf3fCshpHTIfBDCA5c5o9PIpbR0VOdEsVs+nQ1XuI53TQzl7PhZtq4sUxz L7hDkCkN3EkGVYOplqs20a70dHHGfb54S1tNuoJWv1CG9CJcCcEG74wtLeba5aAm KGAYQxSXivv5odz++6dCJ9pEKunWb1WuDWFoF5AfX8PM/toKIv3CYQoQXLfJdp0B hEnJx8/YkwNauQ5eoKsj/sFdWyiTKRKNwcJrXM1EdJtGSZdh/deXicu66tf/A4+k X4syuC8zvh+2VMOECC6R+ARzPvMtNY6tG2C8mOt8gk5REHt89Qo= =5WcR -----END PGP SIGNATURE----- Merge tag 'v5.14.10' into v5.14/standard/base This is the 5.14.10 stable release # gpg: Signature made Thu 07 Oct 2021 01:53:46 AM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
0eeffc5089
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 9
|
||||
SUBLEVEL = 10
|
||||
EXTRAVERSION =
|
||||
NAME = Opossums on Parade
|
||||
|
||||
|
|
|
@ -186,6 +186,8 @@ ENTRY(ret_from_signal)
|
|||
movel %curptr@(TASK_STACK),%a1
|
||||
tstb %a1@(TINFO_FLAGS+2)
|
||||
jge 1f
|
||||
lea %sp@(SWITCH_STACK_SIZE),%a1
|
||||
movel %a1,%curptr@(TASK_THREAD+THREAD_ESP0)
|
||||
jbsr syscall_trace
|
||||
1: RESTORE_SWITCH_STACK
|
||||
addql #4,%sp
|
||||
|
|
|
@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
|
|||
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
|
||||
func##_positive)
|
||||
|
||||
static bool is_bad_offset(int b_off)
|
||||
{
|
||||
return b_off > 0x1ffff || b_off < -0x20000;
|
||||
}
|
||||
|
||||
static int build_body(struct jit_ctx *ctx)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->skf;
|
||||
|
@ -728,7 +733,10 @@ load_common:
|
|||
/* Load return register on DS for failures */
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
/* Return with error */
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
break;
|
||||
case BPF_LD | BPF_W | BPF_IND:
|
||||
|
@ -775,8 +783,10 @@ load_ind:
|
|||
emit_jalr(MIPS_R_RA, r_s0, ctx);
|
||||
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
|
||||
/* Check the error value */
|
||||
emit_bcond(MIPS_COND_NE, r_ret, 0,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
/* We are good */
|
||||
/* X <- P[1:K] & 0xf */
|
||||
|
@ -855,8 +865,10 @@ load_ind:
|
|||
/* A /= X */
|
||||
ctx->flags |= SEEN_X | SEEN_A;
|
||||
/* Check if r_X is zero */
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
|
||||
emit_load_imm(r_ret, 0, ctx); /* delay slot */
|
||||
emit_div(r_A, r_X, ctx);
|
||||
break;
|
||||
|
@ -864,8 +876,10 @@ load_ind:
|
|||
/* A %= X */
|
||||
ctx->flags |= SEEN_X | SEEN_A;
|
||||
/* Check if r_X is zero */
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
|
||||
emit_load_imm(r_ret, 0, ctx); /* delay slot */
|
||||
emit_mod(r_A, r_X, ctx);
|
||||
break;
|
||||
|
@ -926,7 +940,10 @@ load_ind:
|
|||
break;
|
||||
case BPF_JMP | BPF_JA:
|
||||
/* pc += K */
|
||||
emit_b(b_imm(i + k + 1, ctx), ctx);
|
||||
b_off = b_imm(i + k + 1, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
break;
|
||||
case BPF_JMP | BPF_JEQ | BPF_K:
|
||||
|
@ -1056,12 +1073,16 @@ jmp_cmp:
|
|||
break;
|
||||
case BPF_RET | BPF_A:
|
||||
ctx->flags |= SEEN_A;
|
||||
if (i != prog->len - 1)
|
||||
if (i != prog->len - 1) {
|
||||
/*
|
||||
* If this is not the last instruction
|
||||
* then jump to the epilogue
|
||||
*/
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
}
|
||||
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
|
||||
break;
|
||||
case BPF_RET | BPF_K:
|
||||
|
@ -1075,7 +1096,10 @@ jmp_cmp:
|
|||
* If this is not the last instruction
|
||||
* then jump to the epilogue
|
||||
*/
|
||||
emit_b(b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_b(b_off, ctx);
|
||||
emit_nop(ctx);
|
||||
}
|
||||
break;
|
||||
|
@ -1133,8 +1157,10 @@ jmp_cmp:
|
|||
/* Load *dev pointer */
|
||||
emit_load_ptr(r_s0, r_skb, off, ctx);
|
||||
/* error (0) in the delay slot */
|
||||
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
|
||||
b_imm(prog->len, ctx), ctx);
|
||||
b_off = b_imm(prog->len, ctx);
|
||||
if (is_bad_offset(b_off))
|
||||
return -E2BIG;
|
||||
emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
|
||||
emit_reg_move(r_ret, r_zero, ctx);
|
||||
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
|
||||
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
|
||||
|
@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
|
|||
|
||||
/* Generate the actual JIT code */
|
||||
build_prologue(&ctx);
|
||||
build_body(&ctx);
|
||||
if (build_body(&ctx)) {
|
||||
module_memfree(ctx.target);
|
||||
goto out;
|
||||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
/* Update the icache */
|
||||
|
|
|
@ -3,9 +3,10 @@
|
|||
config EARLY_PRINTK
|
||||
bool "Activate early kernel debugging"
|
||||
default y
|
||||
depends on TTY
|
||||
select SERIAL_CORE_CONSOLE
|
||||
help
|
||||
Enable early printk on console
|
||||
Enable early printk on console.
|
||||
This is useful for kernel debugging when your machine crashes very
|
||||
early before the console code is initialized.
|
||||
You should normally say N here, unless you want to debug such a crash.
|
||||
|
|
|
@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
|
|||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
int dram_start;
|
||||
|
||||
console_verbose();
|
||||
|
||||
memory_start = memblock_start_of_DRAM();
|
||||
|
|
|
@ -57,7 +57,7 @@ struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
|
|||
char *bus_id);
|
||||
|
||||
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
|
||||
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
|
||||
int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
|
||||
|
||||
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
|
||||
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
|
||||
|
|
|
@ -849,7 +849,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
|
|||
return -EINVAL;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
if (err)
|
||||
if (!walk.nbytes)
|
||||
return err;
|
||||
|
||||
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
|
||||
|
|
|
@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
|
|||
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT(0xef, 0xf),
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
|
|
@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
|
|||
struct kvm_page_track_notifier_node *node);
|
||||
};
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm);
|
||||
int kvm_page_track_init(struct kvm *kvm);
|
||||
void kvm_page_track_cleanup(struct kvm *kvm);
|
||||
|
||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
|
||||
|
|
|
@ -2,6 +2,20 @@
|
|||
#ifndef _ASM_X86_KVM_CLOCK_H
|
||||
#define _ASM_X86_KVM_CLOCK_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
|
||||
extern struct clocksource kvm_clock;
|
||||
|
||||
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_KVM_CLOCK_H */
|
||||
|
|
|
@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
|
|||
static struct pvclock_vsyscall_time_info
|
||||
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
|
||||
static struct pvclock_wall_clock wall_clock __bss_decrypted;
|
||||
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
static struct pvclock_vsyscall_time_info *hvclock_mem;
|
||||
|
||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||
{
|
||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||
}
|
||||
|
||||
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
|
||||
{
|
||||
return this_cpu_read(hv_clock_per_cpu);
|
||||
}
|
||||
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
|
|
|
@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
|
|||
for (i = 0; i < nent; i++) {
|
||||
e = &entries[i];
|
||||
|
||||
if (e->function == function && (e->index == index ||
|
||||
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
|
||||
if (e->function == function &&
|
||||
(!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
|
||||
return e;
|
||||
}
|
||||
|
||||
|
|
|
@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
|||
__FOP_RET(#op)
|
||||
|
||||
asm(".pushsection .fixup, \"ax\"\n"
|
||||
".global kvm_fastop_exception \n"
|
||||
"kvm_fastop_exception: xor %esi, %esi; ret\n"
|
||||
".popsection");
|
||||
|
||||
|
|
|
@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||
unsigned index;
|
||||
bool mask_before, mask_after;
|
||||
union kvm_ioapic_redirect_entry *e;
|
||||
unsigned long vcpu_bitmap;
|
||||
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
|
||||
switch (ioapic->ioregsel) {
|
||||
case IOAPIC_REG_VERSION:
|
||||
|
@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||
irq.shorthand = APIC_DEST_NOSHORT;
|
||||
irq.dest_id = e->fields.dest_id;
|
||||
irq.msi_redir_hint = false;
|
||||
bitmap_zero(&vcpu_bitmap, 16);
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
if (old_dest_mode != e->fields.dest_mode ||
|
||||
old_dest_id != e->fields.dest_id) {
|
||||
/*
|
||||
|
@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||
kvm_lapic_irq_dest_mode(
|
||||
!!e->fields.dest_mode);
|
||||
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
}
|
||||
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
|
||||
&vcpu_bitmap);
|
||||
vcpu_bitmap);
|
||||
} else {
|
||||
kvm_make_scan_ioapic_request(ioapic->kvm);
|
||||
}
|
||||
|
|
|
@ -163,13 +163,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
|
|||
cleanup_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
void kvm_page_track_init(struct kvm *kvm)
|
||||
int kvm_page_track_init(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_page_track_notifier_head *head;
|
||||
|
||||
head = &kvm->arch.track_notifier_head;
|
||||
init_srcu_struct(&head->track_srcu);
|
||||
INIT_HLIST_HEAD(&head->track_notifier_list);
|
||||
return init_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
|||
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
||||
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
|
||||
|
||||
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
svm->vmcb->control.int_state = svm->nested.ctl.int_state;
|
||||
svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
|
||||
|
|
|
@ -596,20 +596,12 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
int *error)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct sev_data_launch_update_vmsa vmsa;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
vmsa.reserved = 0;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
int ret;
|
||||
|
||||
/* Perform some pre-encryption checks against the VMSA */
|
||||
ret = sev_es_sync_vmsa(svm);
|
||||
|
@ -617,22 +609,37 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
return ret;
|
||||
|
||||
/*
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place
|
||||
* encryption of the VMSA memory content (i.e it will write
|
||||
* the same memory region with the guest's key), so invalidate
|
||||
* it first.
|
||||
* The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
|
||||
* the VMSA memory content (i.e it will write the same memory region
|
||||
* with the guest's key), so invalidate it first.
|
||||
*/
|
||||
clflush_cache_range(svm->vmsa, PAGE_SIZE);
|
||||
|
||||
vmsa.handle = sev->handle;
|
||||
vmsa.reserved = 0;
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
|
||||
&argp->error);
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, ret;
|
||||
|
||||
if (!sev_es_guest(kvm))
|
||||
return -ENOTTY;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
ret = mutex_lock_killable(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
svm->vcpu.arch.guest_state_protected = true;
|
||||
ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1398,8 +1405,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
|
||||
/* Bind ASID to this guest */
|
||||
ret = sev_bind_asid(kvm, start.handle, error);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sev_decommission(start.handle);
|
||||
goto e_free_session;
|
||||
}
|
||||
|
||||
params.handle = start.handle;
|
||||
if (copy_to_user((void __user *)(uintptr_t)argp->data,
|
||||
|
@ -1465,7 +1474,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
|
||||
/* Pin guest memory */
|
||||
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
|
||||
PAGE_SIZE, &n, 0);
|
||||
PAGE_SIZE, &n, 1);
|
||||
if (IS_ERR(guest_page)) {
|
||||
ret = PTR_ERR(guest_page);
|
||||
goto e_free_trans;
|
||||
|
@ -1502,6 +1511,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
|
||||
}
|
||||
|
||||
static bool cmd_allowed_from_miror(u32 cmd_id)
|
||||
{
|
||||
/*
|
||||
* Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
|
||||
* active mirror VMs. Also allow the debugging and status commands.
|
||||
*/
|
||||
if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
|
||||
cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
|
||||
cmd_id == KVM_SEV_DBG_ENCRYPT)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
{
|
||||
struct kvm_sev_cmd sev_cmd;
|
||||
|
@ -1518,8 +1541,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
|||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
/* enc_context_owner handles all memory enc operations */
|
||||
if (is_mirroring_enc_context(kvm)) {
|
||||
/* Only the enc_context_owner handles some memory enc operations. */
|
||||
if (is_mirroring_enc_context(kvm) &&
|
||||
!cmd_allowed_from_miror(sev_cmd.id)) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1716,8 +1740,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
|||
{
|
||||
struct file *source_kvm_file;
|
||||
struct kvm *source_kvm;
|
||||
struct kvm_sev_info *mirror_sev;
|
||||
unsigned int asid;
|
||||
struct kvm_sev_info source_sev, *mirror_sev;
|
||||
int ret;
|
||||
|
||||
source_kvm_file = fget(source_fd);
|
||||
|
@ -1740,7 +1763,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
|||
goto e_source_unlock;
|
||||
}
|
||||
|
||||
asid = to_kvm_svm(source_kvm)->sev_info.asid;
|
||||
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
|
||||
sizeof(source_sev));
|
||||
|
||||
/*
|
||||
* The mirror kvm holds an enc_context_owner ref so its asid can't
|
||||
|
@ -1760,8 +1784,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
|
|||
/* Set enc_context_owner and copy its encryption context over */
|
||||
mirror_sev = &to_kvm_svm(kvm)->sev_info;
|
||||
mirror_sev->enc_context_owner = source_kvm;
|
||||
mirror_sev->asid = asid;
|
||||
mirror_sev->active = true;
|
||||
mirror_sev->asid = source_sev.asid;
|
||||
mirror_sev->fd = source_sev.fd;
|
||||
mirror_sev->es_active = source_sev.es_active;
|
||||
mirror_sev->handle = source_sev.handle;
|
||||
/*
|
||||
* Do not copy ap_jump_table. Since the mirror does not share the same
|
||||
* KVM contexts as the original, and they may have different
|
||||
* memory-views.
|
||||
*/
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
|
|
|
@ -354,14 +354,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
|
|||
switch (msr_index) {
|
||||
case MSR_IA32_VMX_EXIT_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
||||
ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_ENTRY_CTLS:
|
||||
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
||||
ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
||||
ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
|
||||
break;
|
||||
case MSR_IA32_VMX_PINBASED_CTLS:
|
||||
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
|
||||
break;
|
||||
case MSR_IA32_VMX_VMFUNC:
|
||||
ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -5898,6 +5898,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
|||
case EXIT_REASON_VMFUNC:
|
||||
/* VM functions are emulated through L2->L0 vmexits. */
|
||||
return true;
|
||||
case EXIT_REASON_BUS_LOCK:
|
||||
/*
|
||||
* At present, bus lock VM exit is never exposed to L1.
|
||||
* Handle L2's bus locks in L0 directly.
|
||||
*/
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1840,10 +1840,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
&msr_info->data))
|
||||
return 1;
|
||||
/*
|
||||
* Enlightened VMCS v1 doesn't have certain fields, but buggy
|
||||
* Hyper-V versions are still trying to use corresponding
|
||||
* features when they are exposed. Filter out the essential
|
||||
* minimum.
|
||||
* Enlightened VMCS v1 doesn't have certain VMCS fields but
|
||||
* instead of just ignoring the features, different Hyper-V
|
||||
* versions are either trying to use them and fail or do some
|
||||
* sanity checking and refuse to boot. Filter all unsupported
|
||||
* features out.
|
||||
*/
|
||||
if (!msr_info->host_initiated &&
|
||||
vmx->nested.enlightened_vmcs_enabled)
|
||||
|
@ -6815,7 +6816,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
|
||||
if (tsx_ctrl)
|
||||
vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
|
||||
}
|
||||
|
||||
err = alloc_loaded_vmcs(&vmx->vmcs01);
|
||||
|
|
|
@ -10873,6 +10873,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
|
||||
static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
|
||||
|
||||
vcpu->arch.cr3 = 0;
|
||||
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
|
||||
|
||||
/*
|
||||
* Reset the MMU context if paging was enabled prior to INIT (which is
|
||||
* implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
|
||||
|
@ -11090,9 +11093,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
|
|||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kvm_page_track_init(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
|
||||
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
||||
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
|
||||
|
@ -11125,7 +11134,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
|
||||
kvm_apicv_init(kvm);
|
||||
kvm_hv_init_vm(kvm);
|
||||
kvm_page_track_init(kvm);
|
||||
kvm_mmu_init_vm(kvm);
|
||||
|
||||
return static_call(kvm_x86_vm_init)(kvm);
|
||||
|
|
|
@ -1341,9 +1341,10 @@ st: if (is_imm8(insn->off))
|
|||
if (insn->imm == (BPF_AND | BPF_FETCH) ||
|
||||
insn->imm == (BPF_OR | BPF_FETCH) ||
|
||||
insn->imm == (BPF_XOR | BPF_FETCH)) {
|
||||
u8 *branch_target;
|
||||
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
|
||||
u32 real_src_reg = src_reg;
|
||||
u32 real_dst_reg = dst_reg;
|
||||
u8 *branch_target;
|
||||
|
||||
/*
|
||||
* Can't be implemented with a single x86 insn.
|
||||
|
@ -1354,11 +1355,13 @@ st: if (is_imm8(insn->off))
|
|||
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
|
||||
if (src_reg == BPF_REG_0)
|
||||
real_src_reg = BPF_REG_AX;
|
||||
if (dst_reg == BPF_REG_0)
|
||||
real_dst_reg = BPF_REG_AX;
|
||||
|
||||
branch_target = prog;
|
||||
/* Load old value */
|
||||
emit_ldx(&prog, BPF_SIZE(insn->code),
|
||||
BPF_REG_0, dst_reg, insn->off);
|
||||
BPF_REG_0, real_dst_reg, insn->off);
|
||||
/*
|
||||
* Perform the (commutative) operation locally,
|
||||
* put the result in the AUX_REG.
|
||||
|
@ -1369,7 +1372,8 @@ st: if (is_imm8(insn->off))
|
|||
add_2reg(0xC0, AUX_REG, real_src_reg));
|
||||
/* Attempt to swap in new value */
|
||||
err = emit_atomic(&prog, BPF_CMPXCHG,
|
||||
dst_reg, AUX_REG, insn->off,
|
||||
real_dst_reg, AUX_REG,
|
||||
insn->off,
|
||||
BPF_SIZE(insn->code));
|
||||
if (WARN_ON(err))
|
||||
return err;
|
||||
|
@ -1383,7 +1387,6 @@ st: if (is_imm8(insn->off))
|
|||
/* Restore R0 after clobbering RAX */
|
||||
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
|
||||
|
@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
|
|||
}
|
||||
|
||||
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_prog *p, int stack_size, bool mod_ret)
|
||||
struct bpf_prog *p, int stack_size, bool save_ret)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
u8 *jmp_insn;
|
||||
|
@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
|
|||
if (emit_call(&prog, p->bpf_func, prog))
|
||||
return -EINVAL;
|
||||
|
||||
/* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
|
||||
/*
|
||||
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
|
||||
* of the previous call which is then passed on the stack to
|
||||
* the next BPF program.
|
||||
*
|
||||
* BPF_TRAMP_FENTRY trampoline may need to return the return
|
||||
* value of BPF_PROG_TYPE_STRUCT_OPS prog.
|
||||
*/
|
||||
if (mod_ret)
|
||||
if (save_ret)
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
|
||||
/* replace 2 nops with JE insn, since jmp target is known */
|
||||
|
@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
|
|||
}
|
||||
|
||||
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
|
||||
struct bpf_tramp_progs *tp, int stack_size)
|
||||
struct bpf_tramp_progs *tp, int stack_size,
|
||||
bool save_ret)
|
||||
{
|
||||
int i;
|
||||
u8 *prog = *pprog;
|
||||
|
||||
for (i = 0; i < tp->nr_progs; i++) {
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
|
||||
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
|
||||
save_ret))
|
||||
return -EINVAL;
|
||||
}
|
||||
*pprog = prog;
|
||||
|
@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool is_valid_bpf_tramp_flags(unsigned int flags)
|
||||
{
|
||||
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
|
||||
(flags & BPF_TRAMP_F_SKIP_FRAME))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
|
||||
* and it must be used alone.
|
||||
*/
|
||||
if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
|
||||
(flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Example:
|
||||
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
* its 'struct btf_func_model' will be nr_args=2
|
||||
|
@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
|||
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
|
||||
u8 **branches = NULL;
|
||||
u8 *prog;
|
||||
bool save_ret;
|
||||
|
||||
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
|
||||
if (nr_args > 6)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
|
||||
(flags & BPF_TRAMP_F_SKIP_FRAME))
|
||||
if (!is_valid_bpf_tramp_flags(flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
||||
stack_size += 8; /* room for return value of orig_call */
|
||||
/* room for return value of orig_call or fentry prog */
|
||||
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
|
||||
if (save_ret)
|
||||
stack_size += 8;
|
||||
|
||||
if (flags & BPF_TRAMP_F_SKIP_FRAME)
|
||||
/* skip patched call instruction and point orig_call to actual
|
||||
|
@ -1986,7 +2014,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
|||
}
|
||||
|
||||
if (fentry->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fentry, stack_size))
|
||||
if (invoke_bpf(m, &prog, fentry, stack_size,
|
||||
flags & BPF_TRAMP_F_RET_FENTRY_RET))
|
||||
return -EINVAL;
|
||||
|
||||
if (fmod_ret->nr_progs) {
|
||||
|
@ -2033,7 +2062,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
|||
}
|
||||
|
||||
if (fexit->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fexit, stack_size)) {
|
||||
if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -2053,9 +2082,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
|
|||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* restore original return value back into RAX */
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
}
|
||||
/* restore return value of orig_call or fentry prog back into RAX */
|
||||
if (save_ret)
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
EMIT1(0xC9); /* leave */
|
||||
|
|
|
@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
|||
* are likely to increase the throughput.
|
||||
*/
|
||||
bfqq->new_bfqq = new_bfqq;
|
||||
/*
|
||||
* The above assignment schedules the following redirections:
|
||||
* each time some I/O for bfqq arrives, the process that
|
||||
* generated that I/O is disassociated from bfqq and
|
||||
* associated with new_bfqq. Here we increases new_bfqq->ref
|
||||
* in advance, adding the number of processes that are
|
||||
* expected to be associated with new_bfqq as they happen to
|
||||
* issue I/O.
|
||||
*/
|
||||
new_bfqq->ref += process_refs;
|
||||
return new_bfqq;
|
||||
}
|
||||
|
@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
{
|
||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||
|
||||
/* if a merge has already been setup, then proceed with that first */
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
/*
|
||||
* Check delayed stable merge for rotational or non-queueing
|
||||
* devs. For this branch to be executed, bfqq must not be
|
||||
|
@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
if (bfq_too_late_for_merging(bfqq))
|
||||
return NULL;
|
||||
|
||||
if (bfqq->new_bfqq)
|
||||
return bfqq->new_bfqq;
|
||||
|
||||
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
ndr_desc->target_node = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
/* Fallback to address based numa information if node lookup failed */
|
||||
if (ndr_desc->numa_node == NUMA_NO_NODE) {
|
||||
ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
|
||||
dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
|
||||
NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
|
||||
}
|
||||
if (ndr_desc->target_node == NUMA_NO_NODE) {
|
||||
ndr_desc->target_node = phys_to_target_node(spa->address);
|
||||
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
|
||||
NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Persistence domain bits are hierarchical, if
|
||||
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
|
||||
|
|
|
@ -1721,6 +1721,25 @@ static int fw_devlink_create_devlink(struct device *con,
|
|||
struct device *sup_dev;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* In some cases, a device P might also be a supplier to its child node
|
||||
* C. However, this would defer the probe of C until the probe of P
|
||||
* completes successfully. This is perfectly fine in the device driver
|
||||
* model. device_add() doesn't guarantee probe completion of the device
|
||||
* by the time it returns.
|
||||
*
|
||||
* However, there are a few drivers that assume C will finish probing
|
||||
* as soon as it's added and before P finishes probing. So, we provide
|
||||
* a flag to let fw_devlink know not to delay the probe of C until the
|
||||
* probe of P completes successfully.
|
||||
*
|
||||
* When such a flag is set, we can't create device links where P is the
|
||||
* supplier of C as that would delay the probe of C.
|
||||
*/
|
||||
if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
|
||||
fwnode_is_ancestor_of(sup_handle, con->fwnode))
|
||||
return -EINVAL;
|
||||
|
||||
sup_dev = get_dev_from_fwnode(sup_handle);
|
||||
if (sup_dev) {
|
||||
/*
|
||||
|
@ -1771,14 +1790,21 @@ static int fw_devlink_create_devlink(struct device *con,
|
|||
* be broken by applying logic. Check for these types of cycles and
|
||||
* break them so that devices in the cycle probe properly.
|
||||
*
|
||||
* If the supplier's parent is dependent on the consumer, then
|
||||
* the consumer-supplier dependency is a false dependency. So,
|
||||
* treat it as an invalid link.
|
||||
* If the supplier's parent is dependent on the consumer, then the
|
||||
* consumer and supplier have a cyclic dependency. Since fw_devlink
|
||||
* can't tell which of the inferred dependencies are incorrect, don't
|
||||
* enforce probe ordering between any of the devices in this cyclic
|
||||
* dependency. Do this by relaxing all the fw_devlink device links in
|
||||
* this cycle and by treating the fwnode link between the consumer and
|
||||
* the supplier as an invalid dependency.
|
||||
*/
|
||||
sup_dev = fwnode_get_next_parent_dev(sup_handle);
|
||||
if (sup_dev && device_is_dependent(con, sup_dev)) {
|
||||
dev_dbg(con, "Not linking to %pfwP - False link\n",
|
||||
sup_handle);
|
||||
dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
|
||||
sup_handle, dev_name(sup_dev));
|
||||
device_links_write_lock();
|
||||
fw_devlink_relax_cycle(con, sup_dev);
|
||||
device_links_write_unlock();
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -97,13 +97,18 @@ struct nbd_config {
|
|||
|
||||
atomic_t recv_threads;
|
||||
wait_queue_head_t recv_wq;
|
||||
loff_t blksize;
|
||||
unsigned int blksize_bits;
|
||||
loff_t bytesize;
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
struct dentry *dbg_dir;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline unsigned int nbd_blksize(struct nbd_config *config)
|
||||
{
|
||||
return 1u << config->blksize_bits;
|
||||
}
|
||||
|
||||
struct nbd_device {
|
||||
struct blk_mq_tag_set tag_set;
|
||||
|
||||
|
@ -147,7 +152,7 @@ static struct dentry *nbd_dbg_dir;
|
|||
|
||||
#define NBD_MAGIC 0x68797548
|
||||
|
||||
#define NBD_DEF_BLKSIZE 1024
|
||||
#define NBD_DEF_BLKSIZE_BITS 10
|
||||
|
||||
static unsigned int nbds_max = 16;
|
||||
static int max_part = 16;
|
||||
|
@ -350,12 +355,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
|
|||
loff_t blksize)
|
||||
{
|
||||
if (!blksize)
|
||||
blksize = NBD_DEF_BLKSIZE;
|
||||
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
|
||||
if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
|
||||
return -EINVAL;
|
||||
|
||||
nbd->config->bytesize = bytesize;
|
||||
nbd->config->blksize = blksize;
|
||||
nbd->config->blksize_bits = __ffs(blksize);
|
||||
|
||||
if (!nbd->task_recv)
|
||||
return 0;
|
||||
|
@ -1370,7 +1375,7 @@ static int nbd_start_device(struct nbd_device *nbd)
|
|||
args->index = i;
|
||||
queue_work(nbd->recv_workq, &args->work);
|
||||
}
|
||||
return nbd_set_size(nbd, config->bytesize, config->blksize);
|
||||
return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
|
||||
}
|
||||
|
||||
static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
|
||||
|
@ -1439,11 +1444,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
case NBD_SET_BLKSIZE:
|
||||
return nbd_set_size(nbd, config->bytesize, arg);
|
||||
case NBD_SET_SIZE:
|
||||
return nbd_set_size(nbd, arg, config->blksize);
|
||||
return nbd_set_size(nbd, arg, nbd_blksize(config));
|
||||
case NBD_SET_SIZE_BLOCKS:
|
||||
if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
|
||||
if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
|
||||
return -EINVAL;
|
||||
return nbd_set_size(nbd, bytesize, config->blksize);
|
||||
return nbd_set_size(nbd, bytesize, nbd_blksize(config));
|
||||
case NBD_SET_TIMEOUT:
|
||||
nbd_set_cmd_timeout(nbd, arg);
|
||||
return 0;
|
||||
|
@ -1509,7 +1514,7 @@ static struct nbd_config *nbd_alloc_config(void)
|
|||
atomic_set(&config->recv_threads, 0);
|
||||
init_waitqueue_head(&config->recv_wq);
|
||||
init_waitqueue_head(&config->conn_wait);
|
||||
config->blksize = NBD_DEF_BLKSIZE;
|
||||
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
|
||||
atomic_set(&config->live_connections, 0);
|
||||
try_module_get(THIS_MODULE);
|
||||
return config;
|
||||
|
@ -1637,7 +1642,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
|
|||
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
|
||||
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
|
||||
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
|
||||
debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
|
||||
debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
|
||||
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
|
||||
|
||||
return 0;
|
||||
|
@ -1841,7 +1846,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
|
|||
static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
|
||||
{
|
||||
struct nbd_config *config = nbd->config;
|
||||
u64 bsize = config->blksize;
|
||||
u64 bsize = nbd_blksize(config);
|
||||
u64 bytes = config->bytesize;
|
||||
|
||||
if (info->attrs[NBD_ATTR_SIZE_BYTES])
|
||||
|
@ -1850,7 +1855,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
|
|||
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
|
||||
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
|
||||
|
||||
if (bytes != config->bytesize || bsize != config->blksize)
|
||||
if (bytes != config->bytesize || bsize != nbd_blksize(config))
|
||||
return nbd_set_size(nbd, bytes, bsize);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
|
|||
if (count)
|
||||
return count;
|
||||
|
||||
kobject_put(&attr_set->kobj);
|
||||
mutex_destroy(&attr_set->update_lock);
|
||||
kobject_put(&attr_set->kobj);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gov_attr_set_put);
|
||||
|
|
|
@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
|||
in_place ? DMA_BIDIRECTIONAL
|
||||
: DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
goto e_ctx;
|
||||
goto e_aad;
|
||||
|
||||
if (in_place) {
|
||||
dst = src;
|
||||
|
@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
|||
op.u.aes.size = 0;
|
||||
ret = cmd_q->ccp->vdata->perform->aes(&op);
|
||||
if (ret)
|
||||
goto e_dst;
|
||||
goto e_final_wa;
|
||||
|
||||
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
|
||||
/* Put the ciphered tag after the ciphertext. */
|
||||
|
@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
|||
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
goto e_tag;
|
||||
goto e_final_wa;
|
||||
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
|
||||
if (ret)
|
||||
goto e_tag;
|
||||
if (ret) {
|
||||
ccp_dm_free(&tag);
|
||||
goto e_final_wa;
|
||||
}
|
||||
|
||||
ret = crypto_memneq(tag.address, final_wa.address,
|
||||
authsize) ? -EBADMSG : 0;
|
||||
ccp_dm_free(&tag);
|
||||
}
|
||||
|
||||
e_tag:
|
||||
e_final_wa:
|
||||
ccp_dm_free(&final_wa);
|
||||
|
||||
e_dst:
|
||||
|
|
|
@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
|
|||
mutex_lock(&chip->i2c_lock);
|
||||
ret = regmap_read(chip->regmap, inreg, ®_val);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* NOTE:
|
||||
* diagnostic already emitted; that's all we should
|
||||
* do unless gpio_*_value_cansleep() calls become different
|
||||
* from their nonsleeping siblings (and report faults).
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return !!(reg_val & bit);
|
||||
}
|
||||
|
|
|
@ -3602,9 +3602,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
|
||||
fence_driver_init:
|
||||
/* Fence driver */
|
||||
r = amdgpu_fence_driver_init(adev);
|
||||
r = amdgpu_fence_driver_sw_init(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
|
||||
dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
|
||||
goto failed;
|
||||
}
|
||||
|
@ -3631,6 +3631,8 @@ fence_driver_init:
|
|||
goto release_ras_con;
|
||||
}
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
dev_info(adev->dev,
|
||||
"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
|
||||
adev->gfx.config.max_shader_engines,
|
||||
|
@ -3798,7 +3800,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
else
|
||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||
}
|
||||
amdgpu_fence_driver_fini_hw(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
if (adev->pm_sysfs_en)
|
||||
amdgpu_pm_sysfs_fini(adev);
|
||||
|
@ -3820,7 +3822,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
|||
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_device_ip_fini(adev);
|
||||
amdgpu_fence_driver_fini_sw(adev);
|
||||
amdgpu_fence_driver_sw_fini(adev);
|
||||
release_firmware(adev->firmware.gpu_info_fw);
|
||||
adev->firmware.gpu_info_fw = NULL;
|
||||
adev->accel_working = false;
|
||||
|
@ -3895,7 +3897,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
|||
/* evict vram memory */
|
||||
amdgpu_bo_evict_vram(adev);
|
||||
|
||||
amdgpu_fence_driver_suspend(adev);
|
||||
amdgpu_fence_driver_hw_fini(adev);
|
||||
|
||||
amdgpu_device_ip_suspend_phase2(adev);
|
||||
/* evict remaining vram memory
|
||||
|
@ -3940,8 +3942,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
|||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
r = amdgpu_device_ip_late_init(adev);
|
||||
if (r)
|
||||
|
|
|
@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
|
||||
static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
|
||||
{
|
||||
u64 micro_tile_mode;
|
||||
|
||||
/* Zero swizzle mode means linear */
|
||||
if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
|
||||
return 0;
|
||||
|
||||
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
|
||||
switch (micro_tile_mode) {
|
||||
case 0: /* DISPLAY */
|
||||
case 3: /* RENDER */
|
||||
return 0;
|
||||
default:
|
||||
drm_dbg_kms(afb->base.dev,
|
||||
"Micro tile mode %llu not supported for scanout\n",
|
||||
micro_tile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
|
||||
unsigned int *width, unsigned int *height)
|
||||
{
|
||||
|
@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
|
|||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int ret, i;
|
||||
|
||||
/*
|
||||
|
@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev->mode_config.allow_fb_modifiers) {
|
||||
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
|
||||
"GFX9+ requires FB check based on format modifier\n");
|
||||
ret = check_tiling_flags_gfx6(rfb);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev->mode_config.allow_fb_modifiers &&
|
||||
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
ret = convert_tiling_flags_to_modifier(rfb);
|
||||
|
|
|
@ -417,9 +417,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
}
|
||||
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
|
||||
|
||||
if (irq_src)
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
ring->fence_drv.irq_src = irq_src;
|
||||
ring->fence_drv.irq_type = irq_type;
|
||||
ring->fence_drv.initialized = true;
|
||||
|
@ -501,7 +498,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_init - init the fence driver
|
||||
* amdgpu_fence_driver_sw_init - init the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
|
@ -512,20 +509,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
* amdgpu_fence_driver_start_ring().
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
|
||||
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_fini - tear down the fence driver
|
||||
* amdgpu_fence_driver_hw_fini - tear down the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Tear down the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
|
@ -534,8 +531,10 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
|||
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler)
|
||||
drm_sched_fini(&ring->sched);
|
||||
drm_sched_stop(&ring->sched, NULL);
|
||||
|
||||
/* You can't wait for HW to signal if it's gone */
|
||||
if (!drm_dev_is_unplugged(&adev->ddev))
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
|
@ -553,7 +552,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
|
@ -563,6 +562,9 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
|||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler)
|
||||
drm_sched_fini(&ring->sched);
|
||||
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
dma_fence_put(ring->fence_drv.fences[j]);
|
||||
kfree(ring->fence_drv.fences);
|
||||
|
@ -572,49 +574,18 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_suspend - suspend the fence driver
|
||||
* amdgpu_fence_driver_hw_init - enable the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Suspend the fence driver for all possible rings (all asics).
|
||||
*/
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
/* wait for gpu to finish processing current batch */
|
||||
r = amdgpu_fence_wait_empty(ring);
|
||||
if (r) {
|
||||
/* delay GPU reset to resume */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
|
||||
/* disable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_fence_driver_resume - resume the fence driver
|
||||
* for all possible rings.
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
*
|
||||
* Resume the fence driver for all possible rings (all asics).
|
||||
* Enable the fence driver for all possible rings (all asics).
|
||||
* Not all asics have all rings, so each asic will only
|
||||
* start the fence driver on the rings it has using
|
||||
* amdgpu_fence_driver_start_ring().
|
||||
* Returns 0 for success.
|
||||
*/
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -623,6 +594,11 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
|||
if (!ring || !ring->fence_drv.initialized)
|
||||
continue;
|
||||
|
||||
if (!ring->no_scheduler) {
|
||||
drm_sched_resubmit_jobs(&ring->sched);
|
||||
drm_sched_start(&ring->sched, true);
|
||||
}
|
||||
|
||||
/* enable the interrupt */
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
|
|
|
@ -106,9 +106,6 @@ struct amdgpu_fence_driver {
|
|||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
|
||||
|
||||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
||||
|
@ -117,8 +114,10 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
|
|||
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
||||
struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
|
||||
unsigned flags);
|
||||
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
|
||||
|
|
|
@ -3598,7 +3598,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
|||
|
||||
/* set static priority for a queue/ring */
|
||||
gfx_v9_0_mqd_set_priority(ring, mqd);
|
||||
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
|
||||
mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
|
||||
|
||||
/* map_queues packet doesn't need activate the queue,
|
||||
* so only kiq need set this field.
|
||||
|
|
|
@ -883,6 +883,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
|
|||
msleep(1000);
|
||||
}
|
||||
|
||||
/* TODO: check whether can submit a doorbell request to raise
|
||||
* a doorbell fence to exit gfxoff.
|
||||
*/
|
||||
if (adev->in_s0ix)
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
|
||||
sdma_v5_2_soft_reset(adev);
|
||||
/* unhalt the MEs */
|
||||
sdma_v5_2_enable(adev, true);
|
||||
|
@ -891,6 +897,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
|
|||
|
||||
/* start the gfx rings and rlc compute queues */
|
||||
r = sdma_v5_2_gfx_resume(adev);
|
||||
if (adev->in_s0ix)
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
if (r)
|
||||
return r;
|
||||
r = sdma_v5_2_rlc_resume(adev);
|
||||
|
|
|
@ -1117,6 +1117,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
init_data.asic_id.pci_revision_id = adev->pdev->revision;
|
||||
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
|
||||
init_data.asic_id.chip_id = adev->pdev->device;
|
||||
|
||||
init_data.asic_id.vram_width = adev->gmc.vram_width;
|
||||
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
|
||||
|
@ -1724,6 +1725,7 @@ static int dm_late_init(void *handle)
|
|||
linear_lut[i] = 0xFFFF * i / 15;
|
||||
|
||||
params.set = 0;
|
||||
params.backlight_ramping_override = false;
|
||||
params.backlight_ramping_start = 0xCCCC;
|
||||
params.backlight_ramping_reduction = 0xCCCCCCCC;
|
||||
params.backlight_lut_array_size = 16;
|
||||
|
|
|
@ -1813,14 +1813,13 @@ bool perform_link_training_with_retries(
|
|||
if (panel_mode == DP_PANEL_MODE_EDP) {
|
||||
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
|
||||
|
||||
if (cp_psp && cp_psp->funcs.enable_assr) {
|
||||
if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
|
||||
/* since eDP implies ASSR on, change panel
|
||||
* mode to disable ASSR
|
||||
if (cp_psp && cp_psp->funcs.enable_assr)
|
||||
/* ASSR is bound to fail with unsigned PSP
|
||||
* verstage used during devlopment phase.
|
||||
* Report and continue with eDP panel mode to
|
||||
* perform eDP link training with right settings
|
||||
*/
|
||||
panel_mode = DP_PANEL_MODE_DEFAULT;
|
||||
}
|
||||
}
|
||||
cp_psp->funcs.enable_assr(cp_psp->handle, link);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -861,8 +861,6 @@ void intel_rps_park(struct intel_rps *rps)
|
|||
{
|
||||
int adj;
|
||||
|
||||
GEM_BUG_ON(atomic_read(&rps->num_waiters));
|
||||
|
||||
if (!intel_rps_clear_active(rps))
|
||||
return;
|
||||
|
||||
|
|
|
@ -576,7 +576,7 @@ retry:
|
|||
|
||||
/* No one is going to touch shadow bb from now on. */
|
||||
i915_gem_object_flush_map(bb->obj);
|
||||
i915_gem_object_unlock(bb->obj);
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -630,7 +630,7 @@ retry:
|
|||
return ret;
|
||||
}
|
||||
|
||||
i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_ww_ctx_fini(&ww);
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
|
|
|
@ -914,8 +914,6 @@ static void __i915_request_ctor(void *arg)
|
|||
i915_sw_fence_init(&rq->submit, submit_notify);
|
||||
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
|
||||
|
||||
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
|
||||
|
||||
rq->capture_list = NULL;
|
||||
|
||||
init_llist_head(&rq->execute_cb);
|
||||
|
@ -978,17 +976,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
|
|||
rq->ring = ce->ring;
|
||||
rq->execution_mask = ce->engine->mask;
|
||||
|
||||
kref_init(&rq->fence.refcount);
|
||||
rq->fence.flags = 0;
|
||||
rq->fence.error = 0;
|
||||
INIT_LIST_HEAD(&rq->fence.cb_list);
|
||||
|
||||
ret = intel_timeline_get_seqno(tl, rq, &seqno);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
rq->fence.context = tl->fence_context;
|
||||
rq->fence.seqno = seqno;
|
||||
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
|
||||
tl->fence_context, seqno);
|
||||
|
||||
RCU_INIT_POINTER(rq->timeline, tl);
|
||||
rq->hwsp_seqno = tl->hwsp_seqno;
|
||||
|
|
|
@ -239,13 +239,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
|
|||
if (!privdata->cl_data)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
|
||||
mp2_select_ops(privdata);
|
||||
|
||||
rc = amd_sfh_hid_client_init(privdata);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mp2_select_ops(privdata);
|
||||
|
||||
return amd_sfh_hid_client_init(privdata);
|
||||
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
|
||||
}
|
||||
|
||||
static const struct pci_device_id amd_mp2_pci_tbl[] = {
|
||||
|
|
|
@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
|
|||
{
|
||||
struct betopff_device *betopff;
|
||||
struct hid_report *report;
|
||||
struct hid_input *hidinput =
|
||||
list_first_entry(&hid->inputs, struct hid_input, list);
|
||||
struct hid_input *hidinput;
|
||||
struct list_head *report_list =
|
||||
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
|
||||
struct input_dev *dev = hidinput->input;
|
||||
struct input_dev *dev;
|
||||
int field_count = 0;
|
||||
int error;
|
||||
int i, j;
|
||||
|
||||
if (list_empty(&hid->inputs)) {
|
||||
hid_err(hid, "no inputs found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
|
||||
dev = hidinput->input;
|
||||
|
||||
if (list_empty(report_list)) {
|
||||
hid_err(hid, "no output reports found\n");
|
||||
return -ENODEV;
|
||||
|
|
|
@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
|
|||
}
|
||||
|
||||
ret = u2fzero_recv(dev, &req, &resp);
|
||||
if (ret < 0)
|
||||
|
||||
/* ignore errors or packets without data */
|
||||
if (ret < offsetof(struct u2f_hid_msg, init.data))
|
||||
return 0;
|
||||
|
||||
/* only take the minimum amount of data it is safe to take */
|
||||
|
|
|
@ -505,7 +505,7 @@ static void hid_ctrl(struct urb *urb)
|
|||
|
||||
if (unplug) {
|
||||
usbhid->ctrltail = usbhid->ctrlhead;
|
||||
} else {
|
||||
} else if (usbhid->ctrlhead != usbhid->ctrltail) {
|
||||
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
|
||||
|
||||
if (usbhid->ctrlhead != usbhid->ctrltail &&
|
||||
|
@ -1223,9 +1223,20 @@ static void usbhid_stop(struct hid_device *hid)
|
|||
mutex_lock(&usbhid->mutex);
|
||||
|
||||
clear_bit(HID_STARTED, &usbhid->iofl);
|
||||
|
||||
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
|
||||
set_bit(HID_DISCONNECTED, &usbhid->iofl);
|
||||
while (usbhid->ctrltail != usbhid->ctrlhead) {
|
||||
if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) {
|
||||
kfree(usbhid->ctrl[usbhid->ctrltail].raw_report);
|
||||
usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
|
||||
}
|
||||
|
||||
usbhid->ctrltail = (usbhid->ctrltail + 1) &
|
||||
(HID_CONTROL_FIFO_SIZE - 1);
|
||||
}
|
||||
spin_unlock_irq(&usbhid->lock);
|
||||
|
||||
usb_kill_urb(usbhid->urbin);
|
||||
usb_kill_urb(usbhid->urbout);
|
||||
usb_kill_urb(usbhid->urbctrl);
|
||||
|
|
|
@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
{
|
||||
struct mlxreg_fan *fan = cdev->devdata;
|
||||
unsigned long cur_state;
|
||||
int i, config = 0;
|
||||
u32 regval;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
/*
|
||||
|
@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
* overwritten.
|
||||
*/
|
||||
if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
|
||||
/*
|
||||
* This is configuration change, which is only supported through sysfs.
|
||||
* For configuration non-zero value is to be returned to avoid thermal
|
||||
* statistics update.
|
||||
*/
|
||||
config = 1;
|
||||
state -= MLXREG_FAN_MAX_STATE;
|
||||
for (i = 0; i < state; i++)
|
||||
fan->cooling_levels[i] = state;
|
||||
|
@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
|
||||
cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
|
||||
if (state < cur_state)
|
||||
return 0;
|
||||
return config;
|
||||
|
||||
state = cur_state;
|
||||
}
|
||||
|
@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
dev_err(fan->dev, "Failed to write PWM duty\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
return config;
|
||||
}
|
||||
|
||||
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
|
||||
|
|
|
@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
|
|||
if (val == OCC_TEMP_SENSOR_FAULT)
|
||||
return -EREMOTEIO;
|
||||
|
||||
/*
|
||||
* VRM doesn't return temperature, only alarm bit. This
|
||||
* attribute maps to tempX_alarm instead of tempX_input for
|
||||
* VRM
|
||||
*/
|
||||
if (temp->fru_type != OCC_FRU_TYPE_VRM) {
|
||||
/* sensor not ready */
|
||||
if (val == 0)
|
||||
return -EAGAIN;
|
||||
|
||||
val *= 1000;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
val = temp->fru_type;
|
||||
|
@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
|
|||
0, i);
|
||||
attr++;
|
||||
|
||||
if (sensors->temp.version > 1 &&
|
||||
if (sensors->temp.version == 2 &&
|
||||
temp->fru_type == OCC_FRU_TYPE_VRM) {
|
||||
snprintf(attr->name, sizeof(attr->name),
|
||||
"temp%d_alarm", s);
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
|
||||
#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
|
||||
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
|
||||
PMBUS_PHASE_VIRTUAL)
|
||||
PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
|
||||
|
||||
struct mp2975_data {
|
||||
struct pmbus_driver_info info;
|
||||
|
|
|
@ -100,71 +100,81 @@ struct tmp421_data {
|
|||
s16 temp[4];
|
||||
};
|
||||
|
||||
static int temp_from_s16(s16 reg)
|
||||
static int temp_from_raw(u16 reg, bool extended)
|
||||
{
|
||||
/* Mask out status bits */
|
||||
int temp = reg & ~0xf;
|
||||
|
||||
return (temp * 1000 + 128) / 256;
|
||||
if (extended)
|
||||
temp = temp - 64 * 256;
|
||||
else
|
||||
temp = (s16)temp;
|
||||
|
||||
return DIV_ROUND_CLOSEST(temp * 1000, 256);
|
||||
}
|
||||
|
||||
static int temp_from_u16(u16 reg)
|
||||
static int tmp421_update_device(struct tmp421_data *data)
|
||||
{
|
||||
/* Mask out status bits */
|
||||
int temp = reg & ~0xf;
|
||||
|
||||
/* Add offset for extended temperature range. */
|
||||
temp -= 64 * 256;
|
||||
|
||||
return (temp * 1000 + 128) / 256;
|
||||
}
|
||||
|
||||
static struct tmp421_data *tmp421_update_device(struct device *dev)
|
||||
{
|
||||
struct tmp421_data *data = dev_get_drvdata(dev);
|
||||
struct i2c_client *client = data->client;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
|
||||
if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
|
||||
!data->valid) {
|
||||
data->config = i2c_smbus_read_byte_data(client,
|
||||
TMP421_CONFIG_REG_1);
|
||||
ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
data->config = ret;
|
||||
|
||||
for (i = 0; i < data->channels; i++) {
|
||||
data->temp[i] = i2c_smbus_read_byte_data(client,
|
||||
TMP421_TEMP_MSB[i]) << 8;
|
||||
data->temp[i] |= i2c_smbus_read_byte_data(client,
|
||||
TMP421_TEMP_LSB[i]);
|
||||
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
data->temp[i] = ret << 8;
|
||||
|
||||
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
data->temp[i] |= ret;
|
||||
}
|
||||
data->last_updated = jiffies;
|
||||
data->valid = 1;
|
||||
}
|
||||
|
||||
exit:
|
||||
mutex_unlock(&data->update_lock);
|
||||
|
||||
return data;
|
||||
if (ret < 0) {
|
||||
data->valid = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val)
|
||||
{
|
||||
struct tmp421_data *tmp421 = tmp421_update_device(dev);
|
||||
struct tmp421_data *tmp421 = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
ret = tmp421_update_device(tmp421);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (attr) {
|
||||
case hwmon_temp_input:
|
||||
if (tmp421->config & TMP421_CONFIG_RANGE)
|
||||
*val = temp_from_u16(tmp421->temp[channel]);
|
||||
else
|
||||
*val = temp_from_s16(tmp421->temp[channel]);
|
||||
*val = temp_from_raw(tmp421->temp[channel],
|
||||
tmp421->config & TMP421_CONFIG_RANGE);
|
||||
return 0;
|
||||
case hwmon_temp_fault:
|
||||
/*
|
||||
* The OPEN bit signals a fault. This is bit 0 of the temperature
|
||||
* register (low byte).
|
||||
* Any of OPEN or /PVLD bits indicate a hardware mulfunction
|
||||
* and the conversion result may be incorrect
|
||||
*/
|
||||
*val = tmp421->temp[channel] & 0x01;
|
||||
*val = !!(tmp421->temp[channel] & 0x03);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
|
|||
{
|
||||
switch (attr) {
|
||||
case hwmon_temp_fault:
|
||||
if (channel == 0)
|
||||
return 0;
|
||||
return 0444;
|
||||
case hwmon_temp_input:
|
||||
return 0444;
|
||||
default:
|
||||
|
|
|
@ -273,9 +273,6 @@ struct w83791d_data {
|
|||
char valid; /* !=0 if following fields are valid */
|
||||
unsigned long last_updated; /* In jiffies */
|
||||
|
||||
/* array of 2 pointers to subclients */
|
||||
struct i2c_client *lm75[2];
|
||||
|
||||
/* volts */
|
||||
u8 in[NUMBER_OF_VIN]; /* Register value */
|
||||
u8 in_max[NUMBER_OF_VIN]; /* Register value */
|
||||
|
@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
|
|||
static int w83791d_detect_subclients(struct i2c_client *client)
|
||||
{
|
||||
struct i2c_adapter *adapter = client->adapter;
|
||||
struct w83791d_data *data = i2c_get_clientdata(client);
|
||||
int address = client->addr;
|
||||
int i, id;
|
||||
u8 val;
|
||||
|
@ -1280,21 +1276,18 @@ static int w83791d_detect_subclients(struct i2c_client *client)
|
|||
}
|
||||
|
||||
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
|
||||
if (!(val & 0x08))
|
||||
data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
|
||||
0x48 + (val & 0x7));
|
||||
if (!(val & 0x80)) {
|
||||
if (!IS_ERR(data->lm75[0]) &&
|
||||
((val & 0x7) == ((val >> 4) & 0x7))) {
|
||||
|
||||
if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
|
||||
dev_err(&client->dev,
|
||||
"duplicate addresses 0x%x, "
|
||||
"use force_subclient\n",
|
||||
data->lm75[0]->addr);
|
||||
"duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
|
||||
return -ENODEV;
|
||||
}
|
||||
data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
|
||||
0x48 + ((val >> 4) & 0x7));
|
||||
}
|
||||
|
||||
if (!(val & 0x08))
|
||||
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
|
||||
|
||||
if (!(val & 0x80))
|
||||
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -264,9 +264,6 @@ struct w83792d_data {
|
|||
char valid; /* !=0 if following fields are valid */
|
||||
unsigned long last_updated; /* In jiffies */
|
||||
|
||||
/* array of 2 pointers to subclients */
|
||||
struct i2c_client *lm75[2];
|
||||
|
||||
u8 in[9]; /* Register value */
|
||||
u8 in_max[9]; /* Register value */
|
||||
u8 in_min[9]; /* Register value */
|
||||
|
@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
|
|||
int address = new_client->addr;
|
||||
u8 val;
|
||||
struct i2c_adapter *adapter = new_client->adapter;
|
||||
struct w83792d_data *data = i2c_get_clientdata(new_client);
|
||||
|
||||
id = i2c_adapter_id(adapter);
|
||||
if (force_subclients[0] == id && force_subclients[1] == address) {
|
||||
|
@ -946,20 +942,18 @@ w83792d_detect_subclients(struct i2c_client *new_client)
|
|||
}
|
||||
|
||||
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
|
||||
if (!(val & 0x08))
|
||||
data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
|
||||
0x48 + (val & 0x7));
|
||||
if (!(val & 0x80)) {
|
||||
if (!IS_ERR(data->lm75[0]) &&
|
||||
((val & 0x7) == ((val >> 4) & 0x7))) {
|
||||
|
||||
if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
|
||||
dev_err(&new_client->dev,
|
||||
"duplicate addresses 0x%x, use force_subclient\n",
|
||||
data->lm75[0]->addr);
|
||||
"duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
|
||||
return -ENODEV;
|
||||
}
|
||||
data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
|
||||
0x48 + ((val >> 4) & 0x7));
|
||||
}
|
||||
|
||||
if (!(val & 0x08))
|
||||
devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
|
||||
|
||||
if (!(val & 0x80))
|
||||
devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
|
|||
}
|
||||
|
||||
struct w83793_data {
|
||||
struct i2c_client *lm75[2];
|
||||
struct device *hwmon_dev;
|
||||
struct mutex update_lock;
|
||||
char valid; /* !=0 if following fields are valid */
|
||||
|
@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
|
|||
int address = client->addr;
|
||||
u8 tmp;
|
||||
struct i2c_adapter *adapter = client->adapter;
|
||||
struct w83793_data *data = i2c_get_clientdata(client);
|
||||
|
||||
id = i2c_adapter_id(adapter);
|
||||
if (force_subclients[0] == id && force_subclients[1] == address) {
|
||||
|
@ -1586,20 +1584,18 @@ w83793_detect_subclients(struct i2c_client *client)
|
|||
}
|
||||
|
||||
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
|
||||
if (!(tmp & 0x08))
|
||||
data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
|
||||
0x48 + (tmp & 0x7));
|
||||
if (!(tmp & 0x80)) {
|
||||
if (!IS_ERR(data->lm75[0])
|
||||
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
|
||||
|
||||
if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
|
||||
dev_err(&client->dev,
|
||||
"duplicate addresses 0x%x, "
|
||||
"use force_subclients\n", data->lm75[0]->addr);
|
||||
"duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
|
||||
return -ENODEV;
|
||||
}
|
||||
data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
|
||||
0x48 + ((tmp >> 4) & 0x7));
|
||||
}
|
||||
|
||||
if (!(tmp & 0x08))
|
||||
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
|
||||
|
||||
if (!(tmp & 0x80))
|
||||
devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static void cma_cancel_listens(struct rdma_id_private *id_priv)
|
||||
static void _cma_cancel_listens(struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct rdma_id_private *dev_id_priv;
|
||||
|
||||
lockdep_assert_held(&lock);
|
||||
|
||||
/*
|
||||
* Remove from listen_any_list to prevent added devices from spawning
|
||||
* additional listen requests.
|
||||
*/
|
||||
mutex_lock(&lock);
|
||||
list_del(&id_priv->list);
|
||||
|
||||
while (!list_empty(&id_priv->listen_list)) {
|
||||
|
@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
|
|||
rdma_destroy_id(&dev_id_priv->id);
|
||||
mutex_lock(&lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void cma_cancel_listens(struct rdma_id_private *id_priv)
|
||||
{
|
||||
mutex_lock(&lock);
|
||||
_cma_cancel_listens(id_priv);
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
|
@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
|
|||
{
|
||||
switch (state) {
|
||||
case RDMA_CM_ADDR_QUERY:
|
||||
/*
|
||||
* We can avoid doing the rdma_addr_cancel() based on state,
|
||||
* only RDMA_CM_ADDR_QUERY has a work that could still execute.
|
||||
* Notice that the addr_handler work could still be exiting
|
||||
* outside this state, however due to the interaction with the
|
||||
* handler_mutex the work is guaranteed not to touch id_priv
|
||||
* during exit.
|
||||
*/
|
||||
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
|
||||
break;
|
||||
case RDMA_CM_ROUTE_QUERY:
|
||||
|
@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
|
|||
static void destroy_mc(struct rdma_id_private *id_priv,
|
||||
struct cma_multicast *mc)
|
||||
{
|
||||
bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
|
||||
|
||||
if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
|
||||
ib_sa_free_multicast(mc->sa_mc);
|
||||
|
||||
|
@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
|
|||
|
||||
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
|
||||
&mgid);
|
||||
|
||||
if (!send_only)
|
||||
cma_igmp_send(ndev, &mgid, false);
|
||||
|
||||
dev_put(ndev);
|
||||
}
|
||||
|
||||
|
@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
|
|||
return 0;
|
||||
|
||||
err_listen:
|
||||
list_del(&id_priv->list);
|
||||
_cma_cancel_listens(id_priv);
|
||||
mutex_unlock(&lock);
|
||||
if (to_destroy)
|
||||
rdma_destroy_id(&to_destroy->id);
|
||||
|
@ -3410,6 +3430,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
|||
if (dst_addr->sa_family == AF_IB) {
|
||||
ret = cma_resolve_ib_addr(id_priv);
|
||||
} else {
|
||||
/*
|
||||
* The FSM can return back to RDMA_CM_ADDR_BOUND after
|
||||
* rdma_resolve_ip() is called, eg through the error
|
||||
* path in addr_handler(). If this happens the existing
|
||||
* request must be canceled before issuing a new one.
|
||||
* Since canceling a request is a bit slow and this
|
||||
* oddball path is rare, keep track once a request has
|
||||
* been issued. The track turns out to be a permanent
|
||||
* state since this is the only cancel as it is
|
||||
* immediately before rdma_resolve_ip().
|
||||
*/
|
||||
if (id_priv->used_resolve_ip)
|
||||
rdma_addr_cancel(&id->route.addr.dev_addr);
|
||||
else
|
||||
id_priv->used_resolve_ip = 1;
|
||||
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
|
||||
&id->route.addr.dev_addr,
|
||||
timeout_ms, addr_handler,
|
||||
|
@ -3768,9 +3803,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
|||
int ret;
|
||||
|
||||
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
|
||||
struct sockaddr_in any_in = {
|
||||
.sin_family = AF_INET,
|
||||
.sin_addr.s_addr = htonl(INADDR_ANY),
|
||||
};
|
||||
|
||||
/* For a well behaved ULP state will be RDMA_CM_IDLE */
|
||||
id->route.addr.src_addr.ss_family = AF_INET;
|
||||
ret = rdma_bind_addr(id, cma_src_addr(id_priv));
|
||||
ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
|
||||
|
|
|
@ -91,6 +91,7 @@ struct rdma_id_private {
|
|||
u8 afonly;
|
||||
u8 timeout;
|
||||
u8 min_rnr_timer;
|
||||
u8 used_resolve_ip;
|
||||
enum ib_gid_type gid_type;
|
||||
|
||||
/*
|
||||
|
|
|
@ -873,14 +873,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
|
|||
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
|
||||
u64 completed = atomic64_read(&txq->complete_txreqs);
|
||||
|
||||
dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
|
||||
(unsigned long long)txq, q,
|
||||
dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
|
||||
txq, q,
|
||||
__netif_subqueue_stopped(dev, txq->q_idx),
|
||||
atomic_read(&txq->stops),
|
||||
atomic_read(&txq->no_desc),
|
||||
atomic_read(&txq->ring_full));
|
||||
dd_dev_info(priv->dd, "sde %llx engine %u\n",
|
||||
(unsigned long long)txq->sde,
|
||||
dd_dev_info(priv->dd, "sde %p engine %u\n",
|
||||
txq->sde,
|
||||
txq->sde ? txq->sde->this_idx : 0);
|
||||
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
|
||||
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
|
||||
|
|
|
@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
|
|||
INIT_LIST_HEAD(&hr_cq->rq_list);
|
||||
}
|
||||
|
||||
static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
|
||||
static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
|
||||
struct hns_roce_ib_create_cq *ucmd)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
|
||||
|
||||
if (udata) {
|
||||
if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
|
||||
hr_cq->cqe_size = ucmd->cqe_size;
|
||||
else
|
||||
hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
|
||||
} else {
|
||||
if (!udata) {
|
||||
hr_cq->cqe_size = hr_dev->caps.cqe_sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
|
||||
if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
|
||||
ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"invalid cqe size %u.\n", ucmd->cqe_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hr_cq->cqe_size = ucmd->cqe_size;
|
||||
} else {
|
||||
hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
||||
|
@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
|||
|
||||
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
|
||||
|
||||
set_cqe_size(hr_cq, udata, &ucmd);
|
||||
ret = set_cqe_size(hr_cq, udata, &ucmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
|
||||
if (ret) {
|
||||
|
|
|
@ -3306,7 +3306,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
|
|||
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
|
||||
hr_cq->ib_cq.cqe);
|
||||
owner_bit = hr_reg_read(dest, CQE_OWNER);
|
||||
memcpy(dest, cqe, sizeof(*cqe));
|
||||
memcpy(dest, cqe, hr_cq->cqe_size);
|
||||
hr_reg_write(dest, CQE_OWNER, owner_bit);
|
||||
}
|
||||
}
|
||||
|
@ -4411,7 +4411,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
|||
hr_qp->path_mtu = ib_mtu;
|
||||
|
||||
mtu = ib_mtu_enum_to_int(ib_mtu);
|
||||
if (WARN_ON(mtu < 0))
|
||||
if (WARN_ON(mtu <= 0))
|
||||
return -EINVAL;
|
||||
#define MAX_LP_MSG_LEN 65536
|
||||
/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
|
||||
lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
|
||||
if (WARN_ON(lp_pktn_ini >= 0xF))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU) {
|
||||
|
@ -4419,10 +4424,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
|
|||
hr_reg_clear(qpc_mask, QPC_MTU);
|
||||
}
|
||||
|
||||
#define MAX_LP_MSG_LEN 65536
|
||||
/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
|
||||
lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
|
||||
|
||||
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
|
||||
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
|
||||
|
||||
|
|
|
@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
|
|||
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
|
||||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
|
||||
last_ae == IRDMA_AE_BAD_CLOSE ||
|
||||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
|
||||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
|
||||
issue_close = 1;
|
||||
iwqp->cm_id = NULL;
|
||||
qp->term_flags = 0;
|
||||
|
@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
|
|||
teardown_entry);
|
||||
attr.qp_state = IB_QPS_ERR;
|
||||
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
if (iwdev->reset)
|
||||
if (iwdev->rf->reset)
|
||||
irdma_cm_disconn(cm_node->iwqp);
|
||||
irdma_rem_ref_cm_node(cm_node);
|
||||
}
|
||||
|
|
|
@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
|
|||
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
qp->flush_code = FLUSH_GENERAL_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
||||
qp->flush_code = FLUSH_RETRY_EXC_ERR;
|
||||
break;
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
|
||||
qp->flush_code = FLUSH_MW_BIND_ERR;
|
||||
break;
|
||||
default:
|
||||
qp->flush_code = FLUSH_FATAL_ERR;
|
||||
break;
|
||||
|
@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
|
|||
|
||||
irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
|
||||
if (irdma_initialize_ieq(iwdev)) {
|
||||
iwdev->reset = true;
|
||||
iwdev->rf->reset = true;
|
||||
rf->gen_ops.request_reset(rf);
|
||||
}
|
||||
}
|
||||
|
@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
|
|||
case IEQ_CREATED:
|
||||
if (!iwdev->roce_mode)
|
||||
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
|
||||
iwdev->reset);
|
||||
iwdev->rf->reset);
|
||||
fallthrough;
|
||||
case ILQ_CREATED:
|
||||
if (!iwdev->roce_mode)
|
||||
irdma_puda_dele_rsrc(&iwdev->vsi,
|
||||
IRDMA_PUDA_RSRC_TYPE_ILQ,
|
||||
iwdev->reset);
|
||||
iwdev->rf->reset);
|
||||
break;
|
||||
default:
|
||||
ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
|
||||
|
|
|
@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
|
|||
|
||||
iwdev = to_iwdev(ibdev);
|
||||
if (reset)
|
||||
iwdev->reset = true;
|
||||
iwdev->rf->reset = true;
|
||||
|
||||
iwdev->iw_status = 0;
|
||||
irdma_port_ibevent(iwdev);
|
||||
|
|
|
@ -346,7 +346,6 @@ struct irdma_device {
|
|||
bool roce_mode:1;
|
||||
bool roce_dcqcn_en:1;
|
||||
bool dcb:1;
|
||||
bool reset:1;
|
||||
bool iw_ooo:1;
|
||||
enum init_completion_state init_state;
|
||||
|
||||
|
|
|
@ -102,6 +102,8 @@ enum irdma_flush_opcode {
|
|||
FLUSH_REM_OP_ERR,
|
||||
FLUSH_LOC_LEN_ERR,
|
||||
FLUSH_FATAL_ERR,
|
||||
FLUSH_RETRY_EXC_ERR,
|
||||
FLUSH_MW_BIND_ERR,
|
||||
};
|
||||
|
||||
enum irdma_cmpl_status {
|
||||
|
|
|
@ -2510,7 +2510,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
|
|||
struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
|
||||
struct ib_qp_attr attr;
|
||||
|
||||
if (qp->iwdev->reset)
|
||||
if (qp->iwdev->rf->reset)
|
||||
return;
|
||||
attr.qp_state = IB_QPS_ERR;
|
||||
|
||||
|
|
|
@ -535,7 +535,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|||
irdma_qp_rem_ref(&iwqp->ibqp);
|
||||
wait_for_completion(&iwqp->free_qp);
|
||||
irdma_free_lsmm_rsrc(iwqp);
|
||||
if (!iwdev->reset)
|
||||
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
|
||||
|
||||
if (!iwqp->user_mode) {
|
||||
|
@ -2041,7 +2040,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
|
|||
/* Kmode allocations */
|
||||
int rsize;
|
||||
|
||||
if (entries > rf->max_cqe) {
|
||||
if (entries < 1 || entries > rf->max_cqe) {
|
||||
err_code = -EINVAL;
|
||||
goto cq_free_rsrc;
|
||||
}
|
||||
|
@ -3359,6 +3358,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
|
|||
return IB_WC_LOC_LEN_ERR;
|
||||
case FLUSH_GENERAL_ERR:
|
||||
return IB_WC_WR_FLUSH_ERR;
|
||||
case FLUSH_RETRY_EXC_ERR:
|
||||
return IB_WC_RETRY_EXC_ERR;
|
||||
case FLUSH_MW_BIND_ERR:
|
||||
return IB_WC_MW_BIND_ERR;
|
||||
case FLUSH_FATAL_ERR:
|
||||
default:
|
||||
return IB_WC_FATAL_ERR;
|
||||
|
|
|
@ -44,9 +44,9 @@
|
|||
#define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
|
||||
|
||||
#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
|
||||
#define NOC_QOS_PRIORITY_MASK 0xf
|
||||
#define NOC_QOS_PRIORITY_P1_MASK 0xc
|
||||
#define NOC_QOS_PRIORITY_P0_MASK 0x3
|
||||
#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
|
||||
#define NOC_QOS_PRIORITY_P0_SHIFT 0x3
|
||||
|
||||
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
|
||||
#define NOC_QOS_MODEn_MASK 0x3
|
||||
|
@ -307,7 +307,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
|
|||
DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
|
||||
DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
|
||||
DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
|
||||
DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
|
||||
DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
|
||||
DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
|
||||
DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
|
||||
DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
|
||||
|
@ -624,13 +624,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
|
|||
/* Must be updated one at a time, P1 first, P0 last */
|
||||
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
|
||||
rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
|
||||
NOC_QOS_PRIORITY_MASK, val);
|
||||
NOC_QOS_PRIORITY_P1_MASK, val);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
|
||||
return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
|
||||
NOC_QOS_PRIORITY_MASK, val);
|
||||
NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
|
||||
}
|
||||
|
||||
static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
|
||||
|
|
|
@ -33,6 +33,7 @@ struct ipoctal_channel {
|
|||
unsigned int pointer_read;
|
||||
unsigned int pointer_write;
|
||||
struct tty_port tty_port;
|
||||
bool tty_registered;
|
||||
union scc2698_channel __iomem *regs;
|
||||
union scc2698_block __iomem *block_regs;
|
||||
unsigned int board_id;
|
||||
|
@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ipoctal_open(struct tty_struct *tty, struct file *file)
|
||||
static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
|
||||
{
|
||||
struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
|
||||
struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
|
||||
int err;
|
||||
|
||||
tty->driver_data = channel;
|
||||
int res;
|
||||
|
||||
if (!ipack_get_carrier(ipoctal->dev))
|
||||
return -EBUSY;
|
||||
|
||||
err = tty_port_open(&channel->tty_port, tty, file);
|
||||
if (err)
|
||||
res = tty_standard_install(driver, tty);
|
||||
if (res)
|
||||
goto err_put_carrier;
|
||||
|
||||
tty->driver_data = channel;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_carrier:
|
||||
ipack_put_carrier(ipoctal->dev);
|
||||
|
||||
return err;
|
||||
return res;
|
||||
}
|
||||
|
||||
static int ipoctal_open(struct tty_struct *tty, struct file *file)
|
||||
{
|
||||
struct ipoctal_channel *channel = tty->driver_data;
|
||||
|
||||
return tty_port_open(&channel->tty_port, tty, file);
|
||||
}
|
||||
|
||||
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
|
||||
|
@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
int res;
|
||||
int i;
|
||||
struct tty_driver *tty;
|
||||
char name[20];
|
||||
struct ipoctal_channel *channel;
|
||||
struct ipack_region *region;
|
||||
void __iomem *addr;
|
||||
|
@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
/* Fill struct tty_driver with ipoctal data */
|
||||
tty->owner = THIS_MODULE;
|
||||
tty->driver_name = KBUILD_MODNAME;
|
||||
sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
|
||||
tty->name = name;
|
||||
tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
|
||||
if (!tty->name) {
|
||||
res = -ENOMEM;
|
||||
goto err_put_driver;
|
||||
}
|
||||
tty->major = 0;
|
||||
|
||||
tty->minor_start = 0;
|
||||
|
@ -372,8 +387,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
res = tty_register_driver(tty);
|
||||
if (res) {
|
||||
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
|
||||
put_tty_driver(tty);
|
||||
return res;
|
||||
goto err_free_name;
|
||||
}
|
||||
|
||||
/* Save struct tty_driver for use it when uninstalling the device */
|
||||
|
@ -384,7 +398,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
|
||||
channel = &ipoctal->channel[i];
|
||||
tty_port_init(&channel->tty_port);
|
||||
tty_port_alloc_xmit_buf(&channel->tty_port);
|
||||
res = tty_port_alloc_xmit_buf(&channel->tty_port);
|
||||
if (res)
|
||||
continue;
|
||||
channel->tty_port.ops = &ipoctal_tty_port_ops;
|
||||
|
||||
ipoctal_reset_stats(&channel->stats);
|
||||
|
@ -392,13 +408,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
spin_lock_init(&channel->lock);
|
||||
channel->pointer_read = 0;
|
||||
channel->pointer_write = 0;
|
||||
tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
|
||||
tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
|
||||
i, NULL, channel, NULL);
|
||||
if (IS_ERR(tty_dev)) {
|
||||
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
|
||||
tty_port_free_xmit_buf(&channel->tty_port);
|
||||
tty_port_destroy(&channel->tty_port);
|
||||
continue;
|
||||
}
|
||||
dev_set_drvdata(tty_dev, channel);
|
||||
channel->tty_registered = true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -410,6 +428,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
|
|||
ipoctal_irq_handler, ipoctal);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_name:
|
||||
kfree(tty->name);
|
||||
err_put_driver:
|
||||
put_tty_driver(tty);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
|
||||
|
@ -649,6 +674,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
|
|||
|
||||
static const struct tty_operations ipoctal_fops = {
|
||||
.ioctl = NULL,
|
||||
.install = ipoctal_install,
|
||||
.open = ipoctal_open,
|
||||
.close = ipoctal_close,
|
||||
.write = ipoctal_write_tty,
|
||||
|
@ -691,12 +717,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
|
|||
|
||||
for (i = 0; i < NR_CHANNELS; i++) {
|
||||
struct ipoctal_channel *channel = &ipoctal->channel[i];
|
||||
|
||||
if (!channel->tty_registered)
|
||||
continue;
|
||||
|
||||
tty_unregister_device(ipoctal->tty_drv, i);
|
||||
tty_port_free_xmit_buf(&channel->tty_port);
|
||||
tty_port_destroy(&channel->tty_port);
|
||||
}
|
||||
|
||||
tty_unregister_driver(ipoctal->tty_drv);
|
||||
kfree(ipoctal->tty_drv->name);
|
||||
put_tty_driver(ipoctal->tty_drv);
|
||||
kfree(ipoctal);
|
||||
}
|
||||
|
|
|
@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
|
|||
continue;
|
||||
length = 0;
|
||||
switch (c) {
|
||||
/* SOF0: baseline JPEG */
|
||||
case SOF0:
|
||||
/* JPEG_MARKER_SOF0: baseline JPEG */
|
||||
case JPEG_MARKER_SOF0:
|
||||
if (get_word_be(&jpeg_buffer, &word))
|
||||
break;
|
||||
length = (long)word - 2;
|
||||
|
@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
|
|||
notfound = 0;
|
||||
break;
|
||||
|
||||
case DQT:
|
||||
case JPEG_MARKER_DQT:
|
||||
if (get_word_be(&jpeg_buffer, &word))
|
||||
break;
|
||||
length = (long)word - 2;
|
||||
|
@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
|
|||
skip(&jpeg_buffer, length);
|
||||
break;
|
||||
|
||||
case DHT:
|
||||
case JPEG_MARKER_DHT:
|
||||
if (get_word_be(&jpeg_buffer, &word))
|
||||
break;
|
||||
length = (long)word - 2;
|
||||
|
@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
|
|||
skip(&jpeg_buffer, length);
|
||||
break;
|
||||
|
||||
case SOS:
|
||||
case JPEG_MARKER_SOS:
|
||||
sos = jpeg_buffer.curr - 2; /* 0xffda */
|
||||
break;
|
||||
|
||||
/* skip payload-less markers */
|
||||
case RST ... RST + 7:
|
||||
case SOI:
|
||||
case EOI:
|
||||
case TEM:
|
||||
case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
|
||||
case JPEG_MARKER_SOI:
|
||||
case JPEG_MARKER_EOI:
|
||||
case JPEG_MARKER_TEM:
|
||||
break;
|
||||
|
||||
/* skip uninteresting payload markers */
|
||||
|
|
|
@ -37,15 +37,15 @@
|
|||
#define EXYNOS3250_IRQ_TIMEOUT 0x10000000
|
||||
|
||||
/* a selection of JPEG markers */
|
||||
#define TEM 0x01
|
||||
#define SOF0 0xc0
|
||||
#define DHT 0xc4
|
||||
#define RST 0xd0
|
||||
#define SOI 0xd8
|
||||
#define EOI 0xd9
|
||||
#define SOS 0xda
|
||||
#define DQT 0xdb
|
||||
#define DHP 0xde
|
||||
#define JPEG_MARKER_TEM 0x01
|
||||
#define JPEG_MARKER_SOF0 0xc0
|
||||
#define JPEG_MARKER_DHT 0xc4
|
||||
#define JPEG_MARKER_RST 0xd0
|
||||
#define JPEG_MARKER_SOI 0xd8
|
||||
#define JPEG_MARKER_EOI 0xd9
|
||||
#define JPEG_MARKER_SOS 0xda
|
||||
#define JPEG_MARKER_DQT 0xdb
|
||||
#define JPEG_MARKER_DHP 0xde
|
||||
|
||||
/* Flags that indicate a format can be used for capture/output */
|
||||
#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
|
||||
|
@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
|
|||
* @fmt: driver-specific format of this queue
|
||||
* @w: image width
|
||||
* @h: image height
|
||||
* @sos: SOS marker's position relative to the buffer beginning
|
||||
* @dht: DHT markers' positions relative to the buffer beginning
|
||||
* @dqt: DQT markers' positions relative to the buffer beginning
|
||||
* @sof: SOF0 marker's position relative to the buffer beginning
|
||||
* @sof_len: SOF0 marker's payload length (without length field itself)
|
||||
* @sos: JPEG_MARKER_SOS's position relative to the buffer beginning
|
||||
* @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning
|
||||
* @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning
|
||||
* @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning
|
||||
* @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)
|
||||
* @size: image buffer size in bytes
|
||||
*/
|
||||
struct s5p_jpeg_q_data {
|
||||
|
|
|
@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
|
|||
// End transmit and repeat reset command so we exit sump mode
|
||||
static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
|
||||
static const u8 COMMAND_SMODE_ENTER[] = { 's' };
|
||||
static const u8 COMMAND_SMODE_EXIT[] = { 0 };
|
||||
static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
|
||||
|
||||
#define REPLY_XMITCOUNT 't'
|
||||
|
@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
|
|||
buf[i] = cpu_to_be16(v);
|
||||
}
|
||||
|
||||
buf[count] = cpu_to_be16(0xffff);
|
||||
buf[count] = 0xffff;
|
||||
|
||||
irtoy->tx_buf = buf;
|
||||
irtoy->tx_len = size;
|
||||
irtoy->emitted = 0;
|
||||
|
||||
// There is an issue where if the unit is receiving IR while the
|
||||
// first TXSTART command is sent, the device might end up hanging
|
||||
// with its led on. It does not respond to any command when this
|
||||
// happens. To work around this, re-enter sample mode.
|
||||
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
|
||||
sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
|
||||
if (err) {
|
||||
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
|
||||
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
|
||||
if (err) {
|
||||
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
|
||||
STATE_TX);
|
||||
kfree(buf);
|
||||
|
|
|
@ -582,6 +582,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
|
|||
/* Unknown why but without polling reset status, it will hang */
|
||||
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
|
||||
false, priv->rstc);
|
||||
/* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
|
||||
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
|
||||
priv->needs_adjust_hs400 = false;
|
||||
renesas_sdhi_set_clock(host, host->clk_cache);
|
||||
} else if (priv->scc_ctl) {
|
||||
|
|
|
@ -2775,8 +2775,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* Port Control 2: don't force a good FCS, set the maximum frame size to
|
||||
* 10240 bytes, disable 802.1q tags checking, don't discard tagged or
|
||||
/* Port Control 2: don't force a good FCS, set the MTU size to
|
||||
* 10222 bytes, disable 802.1q tags checking, don't discard tagged or
|
||||
* untagged frames on this port, do a destination address lookup on all
|
||||
* received packets as usual, disable ARP mirroring and don't send a
|
||||
* copy of all transmitted/received frames on this port to the CPU.
|
||||
|
@ -2795,7 +2795,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
|
|||
return err;
|
||||
|
||||
if (chip->info->ops->port_set_jumbo_size) {
|
||||
err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
|
||||
err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -2885,10 +2885,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
|
|||
struct mv88e6xxx_chip *chip = ds->priv;
|
||||
|
||||
if (chip->info->ops->port_set_jumbo_size)
|
||||
return 10240;
|
||||
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
|
||||
else if (chip->info->ops->set_max_frame_size)
|
||||
return 1632;
|
||||
return 1522;
|
||||
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
|
||||
return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
|
||||
}
|
||||
|
||||
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
|
||||
|
@ -2896,6 +2896,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
|
|||
struct mv88e6xxx_chip *chip = ds->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
|
||||
new_mtu += EDSA_HLEN;
|
||||
|
||||
mv88e6xxx_reg_lock(chip);
|
||||
if (chip->info->ops->port_set_jumbo_size)
|
||||
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
|
||||
|
@ -3657,7 +3660,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
|
|||
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
|
||||
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
|
||||
.port_set_ether_type = mv88e6351_port_set_ether_type,
|
||||
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
|
||||
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
|
||||
.port_pause_limit = mv88e6097_port_pause_limit,
|
||||
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
|
||||
|
@ -3682,6 +3684,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
|
|||
.avb_ops = &mv88e6165_avb_ops,
|
||||
.ptp_ops = &mv88e6165_ptp_ops,
|
||||
.phylink_validate = mv88e6185_phylink_validate,
|
||||
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
|
||||
};
|
||||
|
||||
static const struct mv88e6xxx_ops mv88e6165_ops = {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/timecounter.h>
|
||||
#include <net/dsa.h>
|
||||
|
||||
#define EDSA_HLEN 8
|
||||
#define MV88E6XXX_N_FID 4096
|
||||
|
||||
/* PVT limits for 4-bit port and 5-bit switch */
|
||||
|
|
|
@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
|
|||
u16 val;
|
||||
int err;
|
||||
|
||||
mtu += ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -1277,6 +1277,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
|
|||
u16 reg;
|
||||
int err;
|
||||
|
||||
size += VLAN_ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -541,8 +541,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
|
|||
|
||||
if (phy_interface_mode_is_rgmii(phy_mode)) {
|
||||
val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
|
||||
val &= ~ENETC_PM0_IFM_EN_AUTO;
|
||||
val &= ENETC_PM0_IFM_IFMODE_MASK;
|
||||
val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
|
||||
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
|
||||
enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
|
||||
}
|
||||
|
|
|
@ -750,7 +750,6 @@ struct hnae3_tc_info {
|
|||
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
|
||||
u16 tqp_count[HNAE3_MAX_TC];
|
||||
u16 tqp_offset[HNAE3_MAX_TC];
|
||||
unsigned long tc_en; /* bitmap of TC enabled */
|
||||
u8 num_tc; /* Total number of enabled TCs */
|
||||
bool mqprio_active;
|
||||
};
|
||||
|
|
|
@ -620,14 +620,10 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (!test_bit(i, &tc_info->tc_en))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < tc_info->num_tc; i++)
|
||||
netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
|
||||
tc_info->tqp_offset[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ret = netif_set_real_num_tx_queues(netdev, queue_size);
|
||||
if (ret) {
|
||||
|
@ -776,6 +772,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
|
|||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
|
||||
netdev_warn(netdev, "net open repeatedly!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
ret = hns3_nic_set_real_num_queue(netdev);
|
||||
|
@ -4825,12 +4826,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
|
|||
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
for (i = 0; i < tc_info->num_tc; i++) {
|
||||
int j;
|
||||
|
||||
if (!test_bit(i, &tc_info->tc_en))
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tc_info->tqp_count[i]; j++) {
|
||||
struct hnae3_queue *q;
|
||||
|
||||
|
|
|
@ -312,33 +312,8 @@ out:
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* hns3_self_test - self test
|
||||
* @ndev: net device
|
||||
* @eth_test: test cmd
|
||||
* @data: test result
|
||||
*/
|
||||
static void hns3_self_test(struct net_device *ndev,
|
||||
struct ethtool_test *eth_test, u64 *data)
|
||||
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
|
||||
bool if_running = netif_running(ndev);
|
||||
int test_index = 0;
|
||||
u32 i;
|
||||
|
||||
if (hns3_nic_resetting(ndev)) {
|
||||
netdev_err(ndev, "dev resetting!");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Only do offline selftest, or pass by default */
|
||||
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
|
||||
return;
|
||||
|
||||
netif_dbg(h, drv, ndev, "self test start");
|
||||
|
||||
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
|
||||
st_param[HNAE3_LOOP_APP][1] =
|
||||
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
|
||||
|
@ -355,13 +330,26 @@ static void hns3_self_test(struct net_device *ndev,
|
|||
st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
|
||||
st_param[HNAE3_LOOP_PHY][1] =
|
||||
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
|
||||
}
|
||||
|
||||
static void hns3_selftest_prepare(struct net_device *ndev,
|
||||
bool if_running, int (*st_param)[2])
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
|
||||
if (netif_msg_ifdown(h))
|
||||
netdev_info(ndev, "self test start\n");
|
||||
|
||||
hns3_set_selftest_param(h, st_param);
|
||||
|
||||
if (if_running)
|
||||
ndev->netdev_ops->ndo_stop(ndev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||
/* Disable the vlan filter for selftest does not support it */
|
||||
if (h->ae_algo->ops->enable_vlan_filter)
|
||||
if (h->ae_algo->ops->enable_vlan_filter &&
|
||||
ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
h->ae_algo->ops->enable_vlan_filter(h, false);
|
||||
#endif
|
||||
|
||||
|
@ -373,6 +361,36 @@ static void hns3_self_test(struct net_device *ndev,
|
|||
h->ae_algo->ops->halt_autoneg(h, true);
|
||||
|
||||
set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
|
||||
}
|
||||
|
||||
static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
|
||||
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
|
||||
|
||||
if (h->ae_algo->ops->halt_autoneg)
|
||||
h->ae_algo->ops->halt_autoneg(h, false);
|
||||
|
||||
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||
if (h->ae_algo->ops->enable_vlan_filter &&
|
||||
ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
h->ae_algo->ops->enable_vlan_filter(h, true);
|
||||
#endif
|
||||
|
||||
if (if_running)
|
||||
ndev->netdev_ops->ndo_open(ndev);
|
||||
|
||||
if (netif_msg_ifdown(h))
|
||||
netdev_info(ndev, "self test end\n");
|
||||
}
|
||||
|
||||
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
|
||||
struct ethtool_test *eth_test, u64 *data)
|
||||
{
|
||||
int test_index = 0;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
|
||||
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
|
||||
|
@ -391,21 +409,32 @@ static void hns3_self_test(struct net_device *ndev,
|
|||
|
||||
test_index++;
|
||||
}
|
||||
}
|
||||
|
||||
clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
|
||||
/**
|
||||
* hns3_nic_self_test - self test
|
||||
* @ndev: net device
|
||||
* @eth_test: test cmd
|
||||
* @data: test result
|
||||
*/
|
||||
static void hns3_self_test(struct net_device *ndev,
|
||||
struct ethtool_test *eth_test, u64 *data)
|
||||
{
|
||||
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
|
||||
bool if_running = netif_running(ndev);
|
||||
|
||||
if (h->ae_algo->ops->halt_autoneg)
|
||||
h->ae_algo->ops->halt_autoneg(h, false);
|
||||
if (hns3_nic_resetting(ndev)) {
|
||||
netdev_err(ndev, "dev resetting!");
|
||||
return;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||
if (h->ae_algo->ops->enable_vlan_filter)
|
||||
h->ae_algo->ops->enable_vlan_filter(h, true);
|
||||
#endif
|
||||
/* Only do offline selftest, or pass by default */
|
||||
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
|
||||
return;
|
||||
|
||||
if (if_running)
|
||||
ndev->netdev_ops->ndo_open(ndev);
|
||||
|
||||
netif_dbg(h, drv, ndev, "self test end\n");
|
||||
hns3_selftest_prepare(ndev, if_running, st_param);
|
||||
hns3_do_selftest(ndev, st_param, eth_test, data);
|
||||
hns3_selftest_restore(ndev, if_running);
|
||||
}
|
||||
|
||||
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
|
||||
|
|
|
@ -472,7 +472,7 @@ err_csq:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int hclge_firmware_compat_config(struct hclge_dev *hdev)
|
||||
static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
|
||||
{
|
||||
struct hclge_firmware_compat_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
|
@ -480,13 +480,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
|
|||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
|
||||
|
||||
if (en) {
|
||||
req = (struct hclge_firmware_compat_cmd *)desc.data;
|
||||
|
||||
hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
|
||||
hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
|
||||
if (hnae3_dev_phy_imp_supported(hdev))
|
||||
hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
|
||||
|
||||
req->compat = cpu_to_le32(compat);
|
||||
}
|
||||
|
||||
return hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
}
|
||||
|
@ -543,7 +546,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
|
|||
/* ask the firmware to enable some features, driver can work without
|
||||
* it.
|
||||
*/
|
||||
ret = hclge_firmware_compat_config(hdev);
|
||||
ret = hclge_firmware_compat_config(hdev, true);
|
||||
if (ret)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Firmware compatible features not enabled(%d).\n",
|
||||
|
@ -573,6 +576,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
|
|||
|
||||
void hclge_cmd_uninit(struct hclge_dev *hdev)
|
||||
{
|
||||
hclge_firmware_compat_config(hdev, false);
|
||||
|
||||
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
|
||||
/* wait to ensure that the firmware completes the possible left
|
||||
* over commands.
|
||||
|
|
|
@ -224,6 +224,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
|
|||
}
|
||||
|
||||
hclge_tm_schd_info_update(hdev, num_tc);
|
||||
if (num_tc > 1)
|
||||
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
|
||||
else
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
|
||||
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
|
||||
if (ret)
|
||||
|
@ -285,8 +289,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
|
|||
u8 i, j, pfc_map, *prio_tc;
|
||||
int ret;
|
||||
|
||||
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
|
||||
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
|
||||
return -EINVAL;
|
||||
|
||||
if (pfc->pfc_en == hdev->tm_info.pfc_en)
|
||||
|
@ -420,8 +423,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
|
|||
static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
|
||||
struct tc_mqprio_qopt_offload *mqprio_qopt)
|
||||
{
|
||||
int i;
|
||||
|
||||
memset(tc_info, 0, sizeof(*tc_info));
|
||||
tc_info->num_tc = mqprio_qopt->qopt.num_tc;
|
||||
memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
|
||||
|
@ -430,9 +431,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
|
|||
sizeof_field(struct hnae3_tc_info, tqp_count));
|
||||
memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
|
||||
sizeof_field(struct hnae3_tc_info, tqp_offset));
|
||||
|
||||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
|
||||
}
|
||||
|
||||
static int hclge_config_tc(struct hclge_dev *hdev,
|
||||
|
@ -498,12 +496,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
|
|||
return hclge_notify_init_up(hdev);
|
||||
|
||||
err_out:
|
||||
if (!tc) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"failed to destroy mqprio, will active after reset, ret = %d\n",
|
||||
ret);
|
||||
} else {
|
||||
/* roll-back */
|
||||
memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
|
||||
if (hclge_config_tc(hdev, &kinfo->tc_info))
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to roll back tc configuration\n");
|
||||
|
||||
}
|
||||
hclge_notify_init_up(hdev);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -8701,15 +8701,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
|
|||
}
|
||||
|
||||
/* check if we just hit the duplicate */
|
||||
if (!ret) {
|
||||
dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
|
||||
vport->vport_id, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"PF failed to add unicast entry(%pM) in the MAC table\n",
|
||||
addr);
|
||||
if (!ret)
|
||||
return -EEXIST;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8861,6 +8854,12 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
|
|||
} else {
|
||||
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
|
||||
&vport->state);
|
||||
|
||||
/* If one unicast mac address is existing in hardware,
|
||||
* we need to try whether other unicast mac addresses
|
||||
* are new addresses that can be added.
|
||||
*/
|
||||
if (ret != -EEXIST)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
|||
|
||||
for (i = 0; i < HNAE3_MAX_TC; i++) {
|
||||
if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
|
||||
set_bit(i, &kinfo->tc_info.tc_en);
|
||||
kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
|
||||
kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
|
||||
} else {
|
||||
/* Set to default queue if TC is disable */
|
||||
clear_bit(i, &kinfo->tc_info.tc_en);
|
||||
kinfo->tc_info.tqp_offset[i] = 0;
|
||||
kinfo->tc_info.tqp_count[i] = 1;
|
||||
}
|
||||
|
@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
|
|||
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
|
||||
hdev->tm_info.prio_tc[i] =
|
||||
(i >= hdev->tm_info.num_tc) ? 0 : i;
|
||||
|
||||
/* DCB is enabled if we have more than 1 TC or pfc_en is
|
||||
* non-zero.
|
||||
*/
|
||||
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
|
||||
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
|
||||
else
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
}
|
||||
|
||||
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
||||
|
@ -767,10 +757,10 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
|||
|
||||
static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
|
||||
{
|
||||
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
|
||||
if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
|
||||
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"DCB is disable, but last mode is FC_PFC\n");
|
||||
"Only 1 tc used, but last mode is FC_PFC\n");
|
||||
|
||||
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
|
||||
} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
|
||||
|
@ -796,7 +786,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
|
|||
}
|
||||
}
|
||||
|
||||
static void hclge_pfc_info_init(struct hclge_dev *hdev)
|
||||
void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
|
||||
{
|
||||
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||
hclge_update_fc_mode(hdev);
|
||||
|
@ -812,7 +802,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
|
|||
|
||||
hclge_tm_vport_info_update(hdev);
|
||||
|
||||
hclge_pfc_info_init(hdev);
|
||||
hclge_tm_pfc_info_update(hdev);
|
||||
}
|
||||
|
||||
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
|
||||
|
@ -1558,19 +1548,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
|
|||
hclge_tm_schd_info_init(hdev);
|
||||
}
|
||||
|
||||
void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
|
||||
{
|
||||
/* DCB is enabled if we have more than 1 TC or pfc_en is
|
||||
* non-zero.
|
||||
*/
|
||||
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
|
||||
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
|
||||
else
|
||||
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
|
||||
|
||||
hclge_pfc_info_init(hdev);
|
||||
}
|
||||
|
||||
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1616,7 +1593,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
|
||||
if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
|
||||
return 0;
|
||||
|
||||
return hclge_tm_bp_setup(hdev);
|
||||
|
|
|
@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
|
|||
sizeof(info->bus_info));
|
||||
}
|
||||
|
||||
#define E100_PHY_REGS 0x1C
|
||||
#define E100_PHY_REGS 0x1D
|
||||
static int e100_get_regs_len(struct net_device *netdev)
|
||||
{
|
||||
struct nic *nic = netdev_priv(netdev);
|
||||
return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
|
||||
|
||||
/* We know the number of registers, and the size of the dump buffer.
|
||||
* Calculate the total size in bytes.
|
||||
*/
|
||||
return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
|
||||
}
|
||||
|
||||
static void e100_get_regs(struct net_device *netdev,
|
||||
|
@ -2455,13 +2459,17 @@ static void e100_get_regs(struct net_device *netdev,
|
|||
buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
|
||||
ioread8(&nic->csr->scb.cmd_lo) << 16 |
|
||||
ioread16(&nic->csr->scb.status);
|
||||
for (i = E100_PHY_REGS; i >= 0; i--)
|
||||
buff[1 + E100_PHY_REGS - i] =
|
||||
mdio_read(netdev, nic->mii.phy_id, i);
|
||||
for (i = 0; i < E100_PHY_REGS; i++)
|
||||
/* Note that we read the registers in reverse order. This
|
||||
* ordering is the ABI apparently used by ethtool and other
|
||||
* applications.
|
||||
*/
|
||||
buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
|
||||
E100_PHY_REGS - 1 - i);
|
||||
memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
|
||||
e100_exec_cb(nic, NULL, e100_dump);
|
||||
msleep(10);
|
||||
memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
|
||||
memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
|
||||
sizeof(nic->mem->dump_buf));
|
||||
}
|
||||
|
||||
|
|
|
@ -3204,7 +3204,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
|
|||
max_combined = ixgbe_max_rss_indices(adapter);
|
||||
}
|
||||
|
||||
return max_combined;
|
||||
return min_t(int, max_combined, num_online_cpus());
|
||||
}
|
||||
|
||||
static void ixgbe_get_channels(struct net_device *dev,
|
||||
|
|
|
@ -10112,6 +10112,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
|
|||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
struct bpf_prog *old_prog;
|
||||
bool need_reset;
|
||||
int num_queues;
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
|
||||
return -EINVAL;
|
||||
|
@ -10161,11 +10162,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
|
|||
/* Kick start the NAPI context if there is an AF_XDP socket open
|
||||
* on that queue id. This so that receiving will start.
|
||||
*/
|
||||
if (need_reset && prog)
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
if (need_reset && prog) {
|
||||
num_queues = min_t(int, adapter->num_rx_queues,
|
||||
adapter->num_xdp_queues);
|
||||
for (i = 0; i < num_queues; i++)
|
||||
if (adapter->xdp_ring[i]->xsk_pool)
|
||||
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
|
||||
XDP_WAKEUP_RX);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1272,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
|
|||
if (!netif_carrier_ok(dev)) {
|
||||
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
|
||||
if (priv->port_state.link_state) {
|
||||
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
|
||||
netif_carrier_on(dev);
|
||||
en_dbg(LINK, priv, "Link Up\n");
|
||||
}
|
||||
|
@ -1560,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
|
|||
mutex_unlock(&mdev->state_lock);
|
||||
}
|
||||
|
||||
static void mlx4_en_linkstate(struct work_struct *work)
|
||||
static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_en_port_state *port_state = &priv->port_state;
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct net_device *dev = priv->dev;
|
||||
bool up;
|
||||
|
||||
if (mlx4_en_QUERY_PORT(mdev, priv->port))
|
||||
port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
|
||||
|
||||
up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
|
||||
if (up == netif_carrier_ok(dev))
|
||||
netif_carrier_event(dev);
|
||||
if (!up) {
|
||||
en_info(priv, "Link Down\n");
|
||||
netif_carrier_off(dev);
|
||||
} else {
|
||||
en_info(priv, "Link Up\n");
|
||||
netif_carrier_on(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_en_linkstate_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
||||
linkstate_task);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int linkstate = priv->link_state;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
/* If observable port state changed set carrier state and
|
||||
* report to system log */
|
||||
if (priv->last_link_state != linkstate) {
|
||||
if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
|
||||
en_info(priv, "Link Down\n");
|
||||
netif_carrier_off(priv->dev);
|
||||
} else {
|
||||
en_info(priv, "Link Up\n");
|
||||
netif_carrier_on(priv->dev);
|
||||
}
|
||||
}
|
||||
priv->last_link_state = linkstate;
|
||||
mlx4_en_linkstate(priv);
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
}
|
||||
|
||||
|
@ -2082,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
|
|||
mlx4_en_clear_stats(dev);
|
||||
|
||||
err = mlx4_en_start_port(dev);
|
||||
if (err)
|
||||
if (err) {
|
||||
en_err(priv, "Failed starting port:%d\n", priv->port);
|
||||
|
||||
goto out;
|
||||
}
|
||||
mlx4_en_linkstate(priv);
|
||||
out:
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
return err;
|
||||
|
@ -3171,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
spin_lock_init(&priv->stats_lock);
|
||||
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
|
||||
INIT_WORK(&priv->restart_task, mlx4_en_restart);
|
||||
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
|
||||
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
|
||||
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
|
||||
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
|
|
@ -552,7 +552,6 @@ struct mlx4_en_priv {
|
|||
|
||||
struct mlx4_hwq_resources res;
|
||||
int link_state;
|
||||
int last_link_state;
|
||||
bool port_up;
|
||||
int port;
|
||||
int registered;
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
#
|
||||
|
||||
obj-$(CONFIG_KS8842) += ks8842.o
|
||||
obj-$(CONFIG_KS8851) += ks8851.o
|
||||
ks8851-objs = ks8851_common.o ks8851_spi.o
|
||||
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
|
||||
ks8851_mll-objs = ks8851_common.o ks8851_par.o
|
||||
obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
|
||||
obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
|
||||
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
|
||||
|
|
|
@ -1057,6 +1057,7 @@ int ks8851_suspend(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ks8851_suspend);
|
||||
|
||||
int ks8851_resume(struct device *dev)
|
||||
{
|
||||
|
@ -1070,6 +1071,7 @@ int ks8851_resume(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ks8851_resume);
|
||||
#endif
|
||||
|
||||
static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
|
||||
|
@ -1243,6 +1245,7 @@ err_reg:
|
|||
err_reg_io:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ks8851_probe_common);
|
||||
|
||||
int ks8851_remove_common(struct device *dev)
|
||||
{
|
||||
|
@ -1261,3 +1264,8 @@ int ks8851_remove_common(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ks8851_remove_common);
|
||||
|
||||
MODULE_DESCRIPTION("KS8851 Network driver");
|
||||
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -380,15 +380,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
|
|||
&ionic_dbg_intr_stats_desc[i]);
|
||||
(*buf)++;
|
||||
}
|
||||
for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
|
||||
**buf = IONIC_READ_STAT64(&txqcq->napi_stats,
|
||||
&ionic_dbg_napi_stats_desc[i]);
|
||||
(*buf)++;
|
||||
}
|
||||
for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
|
||||
**buf = txqcq->napi_stats.work_done_cntr[i];
|
||||
(*buf)++;
|
||||
}
|
||||
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
|
||||
**buf = txstats->sg_cntr[i];
|
||||
(*buf)++;
|
||||
|
|
|
@ -486,6 +486,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|||
timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
|
||||
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
|
||||
eee_tw_timer);
|
||||
if (priv->hw->xpcs)
|
||||
xpcs_config_eee(priv->hw->xpcs,
|
||||
priv->plat->mult_fact_100ns,
|
||||
true);
|
||||
}
|
||||
|
||||
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
|
||||
|
|
|
@ -337,7 +337,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
|
|||
/* Start MHI channels */
|
||||
err = mhi_prepare_for_transfer(mhi_dev);
|
||||
if (err)
|
||||
goto out_err;
|
||||
return err;
|
||||
|
||||
/* Number of transfer descriptors determines size of the queue */
|
||||
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
|
||||
|
@ -347,7 +347,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
|
|||
else
|
||||
err = register_netdev(ndev);
|
||||
if (err)
|
||||
goto out_err;
|
||||
return err;
|
||||
|
||||
if (mhi_netdev->proto) {
|
||||
err = mhi_netdev->proto->init(mhi_netdev);
|
||||
|
@ -359,8 +359,6 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
|
|||
|
||||
out_err_proto:
|
||||
unregister_netdevice(ndev);
|
||||
out_err:
|
||||
free_netdev(ndev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,12 @@
|
|||
#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
|
||||
#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
|
||||
#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
|
||||
#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
|
||||
#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
|
||||
#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
|
||||
#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
|
||||
#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
|
||||
#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
|
||||
#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
|
||||
#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
|
||||
#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
|
||||
|
@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
|
|||
return genphy_config_aneg(phydev);
|
||||
}
|
||||
|
||||
static int phy_set_clr_bits(struct phy_device *dev, int location,
|
||||
static int __phy_set_clr_bits(struct phy_device *dev, int location,
|
||||
int set_mask, int clr_mask)
|
||||
{
|
||||
int v, ret;
|
||||
|
||||
v = phy_read(dev, location);
|
||||
v = __phy_read(dev, location);
|
||||
if (v < 0)
|
||||
return v;
|
||||
|
||||
v &= ~clr_mask;
|
||||
v |= set_mask;
|
||||
|
||||
ret = phy_write(dev, location, v);
|
||||
ret = __phy_write(dev, location, v);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static int phy_set_clr_bits(struct phy_device *dev, int location,
|
||||
int set_mask, int clr_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mdio.bus->mdio_lock);
|
||||
ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
|
||||
mutex_unlock(&dev->mdio.bus->mdio_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
|
|||
return bcm7xxx_28nm_ephy_apd_enable(phydev);
|
||||
}
|
||||
|
||||
#define MII_BCM7XXX_REG_INVALID 0xff
|
||||
|
||||
static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
|
||||
{
|
||||
switch (regnum) {
|
||||
case MDIO_CTRL1:
|
||||
return MII_BCM7XXX_SHD_3_PCS_CTRL;
|
||||
case MDIO_STAT1:
|
||||
return MII_BCM7XXX_SHD_3_PCS_STATUS;
|
||||
case MDIO_PCS_EEE_ABLE:
|
||||
return MII_BCM7XXX_SHD_3_EEE_CAP;
|
||||
case MDIO_AN_EEE_ADV:
|
||||
return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
|
||||
case MDIO_AN_EEE_LPABLE:
|
||||
return MII_BCM7XXX_SHD_3_EEE_LP;
|
||||
case MDIO_PCS_EEE_WK_ERR:
|
||||
return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
|
||||
default:
|
||||
return MII_BCM7XXX_REG_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
|
||||
{
|
||||
return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
|
||||
int devnum, u16 regnum)
|
||||
{
|
||||
u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
|
||||
int ret;
|
||||
|
||||
if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
|
||||
shd == MII_BCM7XXX_REG_INVALID)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* set shadow mode 2 */
|
||||
ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
|
||||
MII_BCM7XXX_SHD_MODE_2, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Access the desired shadow register address */
|
||||
ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
|
||||
if (ret < 0)
|
||||
goto reset_shadow_mode;
|
||||
|
||||
ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
|
||||
|
||||
reset_shadow_mode:
|
||||
/* reset shadow mode 2 */
|
||||
__phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
|
||||
MII_BCM7XXX_SHD_MODE_2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
|
||||
int devnum, u16 regnum, u16 val)
|
||||
{
|
||||
u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
|
||||
int ret;
|
||||
|
||||
if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
|
||||
shd == MII_BCM7XXX_REG_INVALID)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* set shadow mode 2 */
|
||||
ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
|
||||
MII_BCM7XXX_SHD_MODE_2, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Access the desired shadow register address */
|
||||
ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
|
||||
if (ret < 0)
|
||||
goto reset_shadow_mode;
|
||||
|
||||
/* Write the desired value in the shadow register */
|
||||
__phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
|
||||
|
||||
reset_shadow_mode:
|
||||
/* reset shadow mode 2 */
|
||||
return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
|
||||
MII_BCM7XXX_SHD_MODE_2);
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
|
|||
.get_stats = bcm7xxx_28nm_get_phy_stats, \
|
||||
.probe = bcm7xxx_28nm_probe, \
|
||||
.remove = bcm7xxx_28nm_remove, \
|
||||
.read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
|
||||
.write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
|
||||
}
|
||||
|
||||
#define BCM7XXX_40NM_EPHY(_oui, _name) \
|
||||
|
|
|
@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
|
|||
NULL == bus->read || NULL == bus->write)
|
||||
return -EINVAL;
|
||||
|
||||
if (bus->parent && bus->parent->of_node)
|
||||
bus->parent->of_node->fwnode.flags |=
|
||||
FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
|
||||
|
||||
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
|
||||
bus->state != MDIOBUS_UNREGISTERED);
|
||||
|
||||
|
|
|
@ -2353,7 +2353,7 @@ static int remove_net_device(struct hso_device *hso_dev)
|
|||
}
|
||||
|
||||
/* Frees our network device */
|
||||
static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
|
||||
static void hso_free_net_device(struct hso_device *hso_dev)
|
||||
{
|
||||
int i;
|
||||
struct hso_net *hso_net = dev2net(hso_dev);
|
||||
|
@ -2376,7 +2376,7 @@ static void hso_free_net_device(struct hso_device *hso_dev, bool bailout)
|
|||
kfree(hso_net->mux_bulk_tx_buf);
|
||||
hso_net->mux_bulk_tx_buf = NULL;
|
||||
|
||||
if (hso_net->net && !bailout)
|
||||
if (hso_net->net)
|
||||
free_netdev(hso_net->net);
|
||||
|
||||
kfree(hso_dev);
|
||||
|
@ -3136,7 +3136,7 @@ static void hso_free_interface(struct usb_interface *interface)
|
|||
rfkill_unregister(rfk);
|
||||
rfkill_destroy(rfk);
|
||||
}
|
||||
hso_free_net_device(network_table[i], false);
|
||||
hso_free_net_device(network_table[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
|||
|
||||
static void smsc95xx_handle_link_change(struct net_device *net)
|
||||
{
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
|
||||
phy_print_status(net->phydev);
|
||||
usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
|
||||
}
|
||||
|
||||
static int smsc95xx_start_phy(struct usbnet *dev)
|
||||
|
|
|
@ -1867,7 +1867,7 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
|
|||
bcn_int -= data->bcn_delta;
|
||||
data->bcn_delta = 0;
|
||||
}
|
||||
hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
|
||||
hrtimer_forward_now(&data->beacon_timer,
|
||||
ns_to_ktime(bcn_int * NSEC_PER_USEC));
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
|
|
@ -980,6 +980,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
|
|||
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
|
||||
{
|
||||
struct nvme_command *cmd = nvme_req(req)->cmd;
|
||||
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
if (!(req->rq_flags & RQF_DONTPREP)) {
|
||||
|
@ -1028,6 +1029,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
|
|||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
|
||||
nvme_req(req)->genctr++;
|
||||
cmd->common.command_id = nvme_cid(req);
|
||||
trace_nvme_setup_cmd(req, cmd);
|
||||
|
|
|
@ -149,6 +149,12 @@ enum nvme_quirks {
|
|||
* 48 bits.
|
||||
*/
|
||||
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
|
||||
|
||||
/*
|
||||
* The controller requires the command_id value be be limited, so skip
|
||||
* encoding the generation sequence number.
|
||||
*/
|
||||
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -3282,7 +3282,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
|
||||
NVME_QUIRK_128_BYTES_SQES |
|
||||
NVME_QUIRK_SHARED_TAGS },
|
||||
NVME_QUIRK_SHARED_TAGS |
|
||||
NVME_QUIRK_SKIP_CID_GEN },
|
||||
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ 0, }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/gpio/driver.h>
|
||||
|
@ -14,6 +14,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spmi.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
|
||||
|
@ -171,6 +172,8 @@ struct pmic_gpio_state {
|
|||
struct pinctrl_dev *ctrl;
|
||||
struct gpio_chip chip;
|
||||
struct irq_chip irq;
|
||||
u8 usid;
|
||||
u8 pid_base;
|
||||
};
|
||||
|
||||
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
|
||||
|
@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
|
|||
unsigned int *parent_hwirq,
|
||||
unsigned int *parent_type)
|
||||
{
|
||||
*parent_hwirq = child_hwirq + 0xc0;
|
||||
struct pmic_gpio_state *state = gpiochip_get_data(chip);
|
||||
|
||||
*parent_hwirq = child_hwirq + state->pid_base;
|
||||
*parent_type = child_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
|
||||
unsigned int parent_hwirq,
|
||||
unsigned int parent_type)
|
||||
{
|
||||
struct pmic_gpio_state *state = gpiochip_get_data(chip);
|
||||
struct irq_fwspec *fwspec;
|
||||
|
||||
fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
|
||||
if (!fwspec)
|
||||
return NULL;
|
||||
|
||||
fwspec->fwnode = chip->irq.parent_domain->fwnode;
|
||||
|
||||
fwspec->param_count = 4;
|
||||
fwspec->param[0] = state->usid;
|
||||
fwspec->param[1] = parent_hwirq;
|
||||
/* param[2] must be left as 0 */
|
||||
fwspec->param[3] = parent_type;
|
||||
|
||||
return fwspec;
|
||||
}
|
||||
|
||||
static int pmic_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct irq_domain *parent_domain;
|
||||
|
@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
|
|||
struct pmic_gpio_pad *pad, *pads;
|
||||
struct pmic_gpio_state *state;
|
||||
struct gpio_irq_chip *girq;
|
||||
const struct spmi_device *parent_spmi_dev;
|
||||
int ret, npins, i;
|
||||
u32 reg;
|
||||
|
||||
|
@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
|
|||
|
||||
state->dev = &pdev->dev;
|
||||
state->map = dev_get_regmap(dev->parent, NULL);
|
||||
parent_spmi_dev = to_spmi_device(dev->parent);
|
||||
state->usid = parent_spmi_dev->usid;
|
||||
state->pid_base = reg >> 8;
|
||||
|
||||
pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
|
||||
if (!pindesc)
|
||||
|
@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
|
|||
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
|
||||
girq->parent_domain = parent_domain;
|
||||
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
|
||||
girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
|
||||
girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
|
||||
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
|
||||
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user