mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
Merge branch 'v5.13/standard/base' into v5.13/standard/preempt-rt/x86
This commit is contained in:
commit
12e7dd6fe5
|
@ -20,10 +20,10 @@ On x86:
|
|||
|
||||
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
|
||||
|
||||
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is
|
||||
taken inside kvm->arch.mmu_lock, and cannot be taken without already
|
||||
holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
|
||||
there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
|
||||
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
|
||||
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
|
||||
cannot be taken without already holding kvm->arch.mmu_lock (typically with
|
||||
``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
|
||||
|
||||
Everything else is a leaf: no other lock is taken inside the critical
|
||||
sections.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 11
|
||||
SUBLEVEL = 12
|
||||
EXTRAVERSION =
|
||||
NAME = Opossums on Parade
|
||||
|
||||
|
|
|
@ -57,23 +57,26 @@ void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
|
|||
|
||||
void fpu_init_task(struct pt_regs *regs)
|
||||
{
|
||||
const unsigned int fwe = 0x80000000;
|
||||
|
||||
/* default rounding mode */
|
||||
write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
|
||||
|
||||
/* set "Write enable" to allow explicit write to exception flags */
|
||||
write_aux_reg(ARC_REG_FPU_STATUS, 0x80000000);
|
||||
/* Initialize to zero: setting requires FWE be set */
|
||||
write_aux_reg(ARC_REG_FPU_STATUS, fwe);
|
||||
}
|
||||
|
||||
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
|
||||
{
|
||||
struct arc_fpu *save = &prev->thread.fpu;
|
||||
struct arc_fpu *restore = &next->thread.fpu;
|
||||
const unsigned int fwe = 0x80000000;
|
||||
|
||||
save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
|
||||
save->status = read_aux_reg(ARC_REG_FPU_STATUS);
|
||||
|
||||
write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
|
||||
write_aux_reg(ARC_REG_FPU_STATUS, restore->status);
|
||||
write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -190,7 +190,7 @@ static bool range_is_memory(u64 start, u64 end)
|
|||
{
|
||||
struct kvm_mem_range r1, r2;
|
||||
|
||||
if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
|
||||
if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
|
||||
return false;
|
||||
if (r1.start != r2.start)
|
||||
return false;
|
||||
|
|
|
@ -531,6 +531,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||
|
||||
/* irq.c */
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
|
||||
|
||||
void __noreturn unrecoverable_exception(struct pt_regs *regs);
|
||||
|
||||
void replay_system_reset(void);
|
||||
|
|
|
@ -53,7 +53,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
|
|||
extern void *hardirq_ctx[NR_CPUS];
|
||||
extern void *softirq_ctx[NR_CPUS];
|
||||
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
void __do_IRQ(struct pt_regs *regs);
|
||||
extern void __init init_IRQ(void);
|
||||
extern void __do_irq(struct pt_regs *regs);
|
||||
|
||||
|
|
|
@ -68,6 +68,22 @@ struct pt_regs
|
|||
};
|
||||
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
|
||||
};
|
||||
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
|
||||
struct { /* Must be a multiple of 16 bytes */
|
||||
unsigned long mas0;
|
||||
unsigned long mas1;
|
||||
unsigned long mas2;
|
||||
unsigned long mas3;
|
||||
unsigned long mas6;
|
||||
unsigned long mas7;
|
||||
unsigned long srr0;
|
||||
unsigned long srr1;
|
||||
unsigned long csrr0;
|
||||
unsigned long csrr1;
|
||||
unsigned long dsrr0;
|
||||
unsigned long dsrr1;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -349,24 +349,21 @@ int main(void)
|
|||
#endif
|
||||
|
||||
|
||||
#if defined(CONFIG_PPC32)
|
||||
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
||||
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
|
||||
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
|
||||
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
|
||||
STACK_PT_REGS_OFFSET(MAS0, mas0);
|
||||
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
|
||||
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
|
||||
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
|
||||
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
|
||||
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
|
||||
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
|
||||
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
|
||||
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
|
||||
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
|
||||
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
|
||||
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
|
||||
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
|
||||
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
|
||||
#endif
|
||||
STACK_PT_REGS_OFFSET(MMUCR, mas0);
|
||||
STACK_PT_REGS_OFFSET(MAS1, mas1);
|
||||
STACK_PT_REGS_OFFSET(MAS2, mas2);
|
||||
STACK_PT_REGS_OFFSET(MAS3, mas3);
|
||||
STACK_PT_REGS_OFFSET(MAS6, mas6);
|
||||
STACK_PT_REGS_OFFSET(MAS7, mas7);
|
||||
STACK_PT_REGS_OFFSET(_SRR0, srr0);
|
||||
STACK_PT_REGS_OFFSET(_SRR1, srr1);
|
||||
STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
|
||||
STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
|
||||
STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
|
||||
STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PPC64
|
||||
|
|
|
@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
|||
EXCEPTION_PROLOG_1
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
|
||||
prepare_transfer_to_handler
|
||||
lwz r5, _DSISR(r11)
|
||||
lwz r5, _DSISR(r1)
|
||||
andis. r0, r5, DSISR_DABRMATCH@h
|
||||
bne- 1f
|
||||
bl do_page_fault
|
||||
|
|
|
@ -185,20 +185,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
/* only on e500mc */
|
||||
#define DBG_STACK_BASE dbgirq_ctx
|
||||
|
||||
#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
||||
mfspr r8,SPRN_PIR; \
|
||||
slwi r8,r8,2; \
|
||||
addis r8,r8,level##_STACK_BASE@ha; \
|
||||
lwz r8,level##_STACK_BASE@l(r8); \
|
||||
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
|
||||
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
|
||||
#else
|
||||
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
|
||||
lis r8,level##_STACK_BASE@ha; \
|
||||
lwz r8,level##_STACK_BASE@l(r8); \
|
||||
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
|
||||
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -225,7 +223,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
|
|||
mtmsr r11; \
|
||||
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
|
||||
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
|
||||
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
|
||||
addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
|
||||
beq 1f; \
|
||||
/* COMING FROM USER MODE */ \
|
||||
stw r9,_CCR(r11); /* save CR */\
|
||||
|
@ -533,24 +531,5 @@ label:
|
|||
bl kernel_fp_unavailable_exception; \
|
||||
b interrupt_return
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
struct exception_regs {
|
||||
unsigned long mas0;
|
||||
unsigned long mas1;
|
||||
unsigned long mas2;
|
||||
unsigned long mas3;
|
||||
unsigned long mas6;
|
||||
unsigned long mas7;
|
||||
unsigned long srr0;
|
||||
unsigned long srr1;
|
||||
unsigned long csrr0;
|
||||
unsigned long csrr1;
|
||||
unsigned long dsrr0;
|
||||
unsigned long dsrr1;
|
||||
};
|
||||
|
||||
/* ensure this structure is always sized to a multiple of the stack alignment */
|
||||
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __HEAD_BOOKE_H__ */
|
||||
|
|
|
@ -656,7 +656,7 @@ void __do_irq(struct pt_regs *regs)
|
|||
trace_irq_exit(regs);
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
||||
void __do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
void *cursp, *irqsp, *sirqsp;
|
||||
|
@ -680,6 +680,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
|||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
|
||||
{
|
||||
__do_IRQ(regs);
|
||||
}
|
||||
|
||||
static void *__init alloc_vm_stack(void)
|
||||
{
|
||||
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
|
||||
|
|
|
@ -276,7 +276,8 @@ int kprobe_handler(struct pt_regs *regs)
|
|||
if (user_mode(regs))
|
||||
return 0;
|
||||
|
||||
if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
|
||||
if (!IS_ENABLED(CONFIG_BOOKE) &&
|
||||
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1167,7 +1167,7 @@ static int __init topology_init(void)
|
|||
* CPU. For instance, the boot cpu might never be valid
|
||||
* for hotplugging.
|
||||
*/
|
||||
if (smp_ops->cpu_offline_self)
|
||||
if (smp_ops && smp_ops->cpu_offline_self)
|
||||
c->hotpluggable = 1;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
|
|||
|
||||
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
|
||||
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
||||
do_IRQ(regs);
|
||||
__do_IRQ(regs);
|
||||
#endif
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
|
|
|
@ -1107,7 +1107,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
|
|||
_exception(SIGTRAP, regs, TRAP_UNK, 0);
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
||||
static void __single_step_exception(struct pt_regs *regs)
|
||||
{
|
||||
clear_single_step(regs);
|
||||
clear_br_trace(regs);
|
||||
|
@ -1124,6 +1124,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
|||
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
||||
}
|
||||
|
||||
DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
||||
{
|
||||
__single_step_exception(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* After we have successfully emulated an instruction, we have to
|
||||
* check if the instruction was being single-stepped, and if so,
|
||||
|
@ -1133,7 +1138,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
|
|||
static void emulate_single_step(struct pt_regs *regs)
|
||||
{
|
||||
if (single_stepping(regs))
|
||||
single_step_exception(regs);
|
||||
__single_step_exception(regs);
|
||||
}
|
||||
|
||||
static inline int __parse_fpscr(unsigned long fpscr)
|
||||
|
|
|
@ -539,9 +539,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
|
|||
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
|
||||
* H_CPU_BEHAV_FAVOUR_SECURITY is.
|
||||
*/
|
||||
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
|
||||
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
|
||||
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
|
||||
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
|
||||
pseries_security_flavor = 0;
|
||||
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
|
||||
pseries_security_flavor = 1;
|
||||
else
|
||||
pseries_security_flavor = 2;
|
||||
|
|
|
@ -67,6 +67,7 @@ static struct irq_domain *xive_irq_domain;
|
|||
static struct xive_ipi_desc {
|
||||
unsigned int irq;
|
||||
char name[16];
|
||||
atomic_t started;
|
||||
} *xive_ipis;
|
||||
|
||||
/*
|
||||
|
@ -1120,7 +1121,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
|
|||
.alloc = xive_ipi_irq_domain_alloc,
|
||||
};
|
||||
|
||||
static int __init xive_request_ipi(void)
|
||||
static int __init xive_init_ipis(void)
|
||||
{
|
||||
struct fwnode_handle *fwnode;
|
||||
struct irq_domain *ipi_domain;
|
||||
|
@ -1144,10 +1145,6 @@ static int __init xive_request_ipi(void)
|
|||
struct xive_ipi_desc *xid = &xive_ipis[node];
|
||||
struct xive_ipi_alloc_info info = { node };
|
||||
|
||||
/* Skip nodes without CPUs */
|
||||
if (cpumask_empty(cpumask_of_node(node)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Map one IPI interrupt per node for all cpus of that node.
|
||||
* Since the HW interrupt number doesn't have any meaning,
|
||||
|
@ -1159,11 +1156,6 @@ static int __init xive_request_ipi(void)
|
|||
xid->irq = ret;
|
||||
|
||||
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
|
||||
|
||||
ret = request_irq(xid->irq, xive_muxed_ipi_action,
|
||||
IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
|
||||
|
||||
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1178,6 +1170,22 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __init xive_request_ipi(unsigned int cpu)
|
||||
{
|
||||
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
|
||||
int ret;
|
||||
|
||||
if (atomic_inc_return(&xid->started) > 1)
|
||||
return 0;
|
||||
|
||||
ret = request_irq(xid->irq, xive_muxed_ipi_action,
|
||||
IRQF_PERCPU | IRQF_NO_THREAD,
|
||||
xid->name, NULL);
|
||||
|
||||
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xive_setup_cpu_ipi(unsigned int cpu)
|
||||
{
|
||||
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
|
||||
|
@ -1192,6 +1200,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
|
|||
if (xc->hw_ipi != XIVE_BAD_IRQ)
|
||||
return 0;
|
||||
|
||||
/* Register the IPI */
|
||||
xive_request_ipi(cpu);
|
||||
|
||||
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
|
||||
if (xive_ops->get_ipi(cpu, xc))
|
||||
return -EIO;
|
||||
|
@ -1231,6 +1242,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
|
|||
if (xc->hw_ipi == XIVE_BAD_IRQ)
|
||||
return;
|
||||
|
||||
/* TODO: clear IPI mapping */
|
||||
|
||||
/* Mask the IPI */
|
||||
xive_do_source_set_mask(&xc->ipi_data, true);
|
||||
|
||||
|
@ -1253,7 +1266,7 @@ void __init xive_smp_probe(void)
|
|||
smp_ops->cause_ipi = xive_cause_ipi;
|
||||
|
||||
/* Register the IPI */
|
||||
xive_request_ipi();
|
||||
xive_init_ipis();
|
||||
|
||||
/* Allocate and setup IPI for the boot CPU */
|
||||
xive_setup_cpu_ipi(smp_processor_id());
|
||||
|
|
|
@ -11,7 +11,7 @@ endif
|
|||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||
|
||||
ifdef CONFIG_KEXEC
|
||||
AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
|
||||
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
|
||||
endif
|
||||
|
||||
extra-y += head.o
|
||||
|
|
|
@ -2904,24 +2904,28 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
|||
*/
|
||||
static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
|
||||
bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
|
||||
int loops;
|
||||
u64 status;
|
||||
int handled;
|
||||
int pmu_enabled;
|
||||
|
||||
cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
/*
|
||||
* Save the PMU state.
|
||||
* It needs to be restored when leaving the handler.
|
||||
*/
|
||||
pmu_enabled = cpuc->enabled;
|
||||
/*
|
||||
* No known reason to not always do late ACK,
|
||||
* but just in case do it opt-in.
|
||||
* In general, the early ACK is only applied for old platforms.
|
||||
* For the big core starts from Haswell, the late ACK should be
|
||||
* applied.
|
||||
* For the small core after Tremont, we have to do the ACK right
|
||||
* before re-enabling counters, which is in the middle of the
|
||||
* NMI handler.
|
||||
*/
|
||||
if (!x86_pmu.late_ack)
|
||||
if (!late_ack && !mid_ack)
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
intel_bts_disable_local();
|
||||
cpuc->enabled = 0;
|
||||
|
@ -2958,6 +2962,8 @@ again:
|
|||
goto again;
|
||||
|
||||
done:
|
||||
if (mid_ack)
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
|
||||
cpuc->enabled = pmu_enabled;
|
||||
if (pmu_enabled)
|
||||
|
@ -2969,7 +2975,7 @@ done:
|
|||
* have been reset. This avoids spurious NMIs on
|
||||
* Haswell CPUs.
|
||||
*/
|
||||
if (x86_pmu.late_ack)
|
||||
if (late_ack)
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
return handled;
|
||||
}
|
||||
|
@ -6123,7 +6129,6 @@ __init int intel_pmu_init(void)
|
|||
static_branch_enable(&perf_is_hybrid);
|
||||
x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
|
||||
|
||||
x86_pmu.late_ack = true;
|
||||
x86_pmu.pebs_aliases = NULL;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
x86_pmu.pebs_block = true;
|
||||
|
@ -6161,6 +6166,7 @@ __init int intel_pmu_init(void)
|
|||
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
|
||||
pmu->name = "cpu_core";
|
||||
pmu->cpu_type = hybrid_big;
|
||||
pmu->late_ack = true;
|
||||
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
|
||||
pmu->num_counters = x86_pmu.num_counters + 2;
|
||||
pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
|
||||
|
@ -6186,6 +6192,7 @@ __init int intel_pmu_init(void)
|
|||
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
|
||||
pmu->name = "cpu_atom";
|
||||
pmu->cpu_type = hybrid_small;
|
||||
pmu->mid_ack = true;
|
||||
pmu->num_counters = x86_pmu.num_counters;
|
||||
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
|
||||
pmu->max_pebs_events = x86_pmu.max_pebs_events;
|
||||
|
|
|
@ -656,6 +656,10 @@ struct x86_hybrid_pmu {
|
|||
struct event_constraint *event_constraints;
|
||||
struct event_constraint *pebs_constraints;
|
||||
struct extra_reg *extra_regs;
|
||||
|
||||
unsigned int late_ack :1,
|
||||
mid_ack :1,
|
||||
enabled_ack :1;
|
||||
};
|
||||
|
||||
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
|
||||
|
@ -686,6 +690,16 @@ extern struct static_key_false perf_is_hybrid;
|
|||
__Fp; \
|
||||
}))
|
||||
|
||||
#define hybrid_bit(_pmu, _field) \
|
||||
({ \
|
||||
bool __Fp = x86_pmu._field; \
|
||||
\
|
||||
if (is_hybrid() && (_pmu)) \
|
||||
__Fp = hybrid_pmu(_pmu)->_field; \
|
||||
\
|
||||
__Fp; \
|
||||
})
|
||||
|
||||
enum hybrid_pmu_type {
|
||||
hybrid_big = 0x40,
|
||||
hybrid_small = 0x20,
|
||||
|
@ -755,6 +769,7 @@ struct x86_pmu {
|
|||
|
||||
/* PMI handler bits */
|
||||
unsigned int late_ack :1,
|
||||
mid_ack :1,
|
||||
enabled_ack :1;
|
||||
/*
|
||||
* sysfs attrs
|
||||
|
|
|
@ -987,6 +987,13 @@ struct kvm_arch {
|
|||
struct list_head lpage_disallowed_mmu_pages;
|
||||
struct kvm_page_track_notifier_node mmu_sp_tracker;
|
||||
struct kvm_page_track_notifier_head track_notifier_head;
|
||||
/*
|
||||
* Protects marking pages unsync during page faults, as TDP MMU page
|
||||
* faults only take mmu_lock for read. For simplicity, the unsync
|
||||
* pages lock is always taken when marking pages unsync regardless of
|
||||
* whether mmu_lock is held for read or write.
|
||||
*/
|
||||
spinlock_t mmu_unsync_pages_lock;
|
||||
|
||||
struct list_head assigned_dev_head;
|
||||
struct iommu_domain *iommu_domain;
|
||||
|
|
|
@ -178,6 +178,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
|||
#define V_IGN_TPR_SHIFT 20
|
||||
#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
|
||||
|
||||
#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
|
||||
|
||||
#define V_INTR_MASKING_SHIFT 24
|
||||
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
|
||||
|
||||
|
|
|
@ -1986,7 +1986,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
|||
.irq_set_affinity = ioapic_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static struct irq_chip ioapic_ir_chip __read_mostly = {
|
||||
|
@ -1999,7 +2000,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
|
|||
.irq_set_affinity = ioapic_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static inline void init_IO_APIC_traps(void)
|
||||
|
|
|
@ -58,11 +58,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
|
|||
* The quirk bit is not set in this case.
|
||||
* - The new vector is the same as the old vector
|
||||
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
||||
* - The interrupt is not yet started up
|
||||
* - The new destination CPU is the same as the old destination CPU
|
||||
*/
|
||||
if (!irqd_msi_nomask_quirk(irqd) ||
|
||||
cfg->vector == old_cfg.vector ||
|
||||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
||||
!irqd_is_started(irqd) ||
|
||||
cfg->dest_apicid == old_cfg.dest_apicid) {
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
return ret;
|
||||
|
@ -150,7 +152,8 @@ static struct irq_chip pci_msi_controller = {
|
|||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_affinity = msi_set_affinity,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
|
||||
|
@ -219,7 +222,8 @@ static struct irq_chip pci_msi_ir_controller = {
|
|||
.irq_mask = pci_msi_mask_irq,
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static struct msi_domain_info pci_msi_ir_domain_info = {
|
||||
|
@ -273,7 +277,8 @@ static struct irq_chip dmar_msi_controller = {
|
|||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_compose_msi_msg = dmar_msi_compose_msg,
|
||||
.irq_write_msi_msg = dmar_msi_write_msg,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static int dmar_msi_init(struct irq_domain *domain,
|
||||
|
|
|
@ -285,15 +285,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
|||
return chunks >>= shift;
|
||||
}
|
||||
|
||||
static int __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
|
||||
{
|
||||
struct mbm_state *m;
|
||||
u64 chunks, tval;
|
||||
|
||||
tval = __rmid_read(rmid, rr->evtid);
|
||||
if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
|
||||
rr->val = tval;
|
||||
return -EINVAL;
|
||||
return tval;
|
||||
}
|
||||
switch (rr->evtid) {
|
||||
case QOS_L3_OCCUP_EVENT_ID:
|
||||
|
@ -305,12 +304,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
|
|||
case QOS_L3_MBM_LOCAL_EVENT_ID:
|
||||
m = &rr->d->mbm_local[rmid];
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Code would never reach here because
|
||||
* an invalid event id would fail the __rmid_read.
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rr->first) {
|
||||
|
@ -361,23 +354,29 @@ void mon_event_count(void *info)
|
|||
struct rdtgroup *rdtgrp, *entry;
|
||||
struct rmid_read *rr = info;
|
||||
struct list_head *head;
|
||||
u64 ret_val;
|
||||
|
||||
rdtgrp = rr->rgrp;
|
||||
|
||||
if (__mon_event_count(rdtgrp->mon.rmid, rr))
|
||||
return;
|
||||
ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
|
||||
|
||||
/*
|
||||
* For Ctrl groups read data from child monitor groups.
|
||||
* For Ctrl groups read data from child monitor groups and
|
||||
* add them together. Count events which are read successfully.
|
||||
* Discard the rmid_read's reporting errors.
|
||||
*/
|
||||
head = &rdtgrp->mon.crdtgrp_list;
|
||||
|
||||
if (rdtgrp->type == RDTCTRL_GROUP) {
|
||||
list_for_each_entry(entry, head, mon.crdtgrp_list) {
|
||||
if (__mon_event_count(entry->mon.rmid, rr))
|
||||
return;
|
||||
if (__mon_event_count(entry->mon.rmid, rr) == 0)
|
||||
ret_val = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Report error if none of rmid_reads are successful */
|
||||
if (ret_val)
|
||||
rr->val = ret_val;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -508,7 +508,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
|
|||
.irq_set_affinity = msi_domain_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_write_msi_msg = hpet_msi_write_msg,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
|
||||
};
|
||||
|
||||
static int hpet_msi_init(struct irq_domain *domain,
|
||||
|
|
|
@ -2454,6 +2454,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|||
bool can_unsync)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
bool locked = false;
|
||||
|
||||
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
|
||||
return true;
|
||||
|
@ -2465,9 +2466,34 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
|
|||
if (sp->unsync)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* TDP MMU page faults require an additional spinlock as they
|
||||
* run with mmu_lock held for read, not write, and the unsync
|
||||
* logic is not thread safe. Take the spinklock regardless of
|
||||
* the MMU type to avoid extra conditionals/parameters, there's
|
||||
* no meaningful penalty if mmu_lock is held for write.
|
||||
*/
|
||||
if (!locked) {
|
||||
locked = true;
|
||||
spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
|
||||
|
||||
/*
|
||||
* Recheck after taking the spinlock, a different vCPU
|
||||
* may have since marked the page unsync. A false
|
||||
* positive on the unprotected check above is not
|
||||
* possible as clearing sp->unsync _must_ hold mmu_lock
|
||||
* for write, i.e. unsync cannot transition from 0->1
|
||||
* while this CPU holds mmu_lock for read (or write).
|
||||
*/
|
||||
if (READ_ONCE(sp->unsync))
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON(sp->role.level != PG_LEVEL_4K);
|
||||
kvm_unsync_page(vcpu, sp);
|
||||
}
|
||||
if (locked)
|
||||
spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
|
||||
|
||||
/*
|
||||
* We need to ensure that the marking of unsync pages is visible
|
||||
|
@ -5514,6 +5540,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
|
|||
{
|
||||
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
|
||||
|
||||
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
||||
|
||||
kvm_mmu_init_tdp_mmu(kvm);
|
||||
|
||||
node->track_write = kvm_mmu_pte_write;
|
||||
|
|
|
@ -41,6 +41,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
|||
if (!kvm->arch.tdp_mmu_enabled)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
|
||||
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
|
||||
|
||||
/*
|
||||
|
@ -79,8 +80,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
|
|||
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool shared)
|
||||
{
|
||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
|
||||
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
||||
|
||||
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
|
||||
|
@ -92,7 +91,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||
list_del_rcu(&root->link);
|
||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
|
||||
zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
|
||||
zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
|
||||
|
||||
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
|
||||
}
|
||||
|
@ -722,8 +721,17 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||
gfn_t start, gfn_t end, bool can_yield, bool flush,
|
||||
bool shared)
|
||||
{
|
||||
gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
bool zap_all = (start == 0 && end >= max_gfn_host);
|
||||
struct tdp_iter iter;
|
||||
|
||||
/*
|
||||
* Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
|
||||
* hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
|
||||
* and so KVM will never install a SPTE for such addresses.
|
||||
*/
|
||||
end = min(end, max_gfn_host);
|
||||
|
||||
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -742,9 +750,10 @@ retry:
|
|||
/*
|
||||
* If this is a non-last-level SPTE that covers a larger range
|
||||
* than should be zapped, continue, and zap the mappings at a
|
||||
* lower level.
|
||||
* lower level, except when zapping all SPTEs.
|
||||
*/
|
||||
if ((iter.gfn < start ||
|
||||
if (!zap_all &&
|
||||
(iter.gfn < start ||
|
||||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
|
@ -792,12 +801,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
|||
|
||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
|
||||
{
|
||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
bool flush = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
|
||||
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
|
||||
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
|
||||
flush, false);
|
||||
|
||||
if (flush)
|
||||
|
@ -836,7 +844,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
|
|||
*/
|
||||
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
||||
{
|
||||
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
|
||||
struct kvm_mmu_page *next_root;
|
||||
struct kvm_mmu_page *root;
|
||||
bool flush = false;
|
||||
|
@ -852,8 +859,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
|||
|
||||
rcu_read_unlock();
|
||||
|
||||
flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
|
||||
true);
|
||||
flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
|
||||
|
||||
/*
|
||||
* Put the reference acquired in
|
||||
|
|
|
@ -149,6 +149,9 @@ void recalc_intercepts(struct vcpu_svm *svm)
|
|||
|
||||
for (i = 0; i < MAX_INTERCEPT; i++)
|
||||
c->intercepts[i] |= g->intercepts[i];
|
||||
|
||||
vmcb_set_intercept(c, INTERCEPT_VMLOAD);
|
||||
vmcb_set_intercept(c, INTERCEPT_VMSAVE);
|
||||
}
|
||||
|
||||
static void copy_vmcb_control_area(struct vmcb_control_area *dst,
|
||||
|
@ -480,7 +483,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
|
|||
|
||||
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
||||
{
|
||||
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
|
||||
const u32 int_ctl_vmcb01_bits =
|
||||
V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
|
||||
|
||||
const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
|
||||
|
||||
/*
|
||||
* Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
|
||||
|
@ -511,8 +517,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
|
|||
svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
|
||||
|
||||
svm->vmcb->control.int_ctl =
|
||||
(svm->nested.ctl.int_ctl & ~mask) |
|
||||
(svm->vmcb01.ptr->control.int_ctl & mask);
|
||||
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
|
||||
(svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
|
||||
|
||||
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
|
||||
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
|
||||
|
|
|
@ -1552,17 +1552,18 @@ static void svm_set_vintr(struct vcpu_svm *svm)
|
|||
|
||||
static void svm_clear_vintr(struct vcpu_svm *svm)
|
||||
{
|
||||
const u32 mask = V_TPR_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK | V_INTR_MASKING_MASK;
|
||||
svm_clr_intercept(svm, INTERCEPT_VINTR);
|
||||
|
||||
/* Drop int_ctl fields related to VINTR injection. */
|
||||
svm->vmcb->control.int_ctl &= mask;
|
||||
svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
|
||||
if (is_guest_mode(&svm->vcpu)) {
|
||||
svm->vmcb01.ptr->control.int_ctl &= mask;
|
||||
svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
|
||||
|
||||
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
|
||||
(svm->nested.ctl.int_ctl & V_TPR_MASK));
|
||||
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
|
||||
|
||||
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
|
||||
V_IRQ_INJECTION_BITS_MASK;
|
||||
}
|
||||
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
|
||||
|
|
|
@ -5798,7 +5798,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
|||
if (is_nmi(intr_info))
|
||||
return true;
|
||||
else if (is_page_fault(intr_info))
|
||||
return vcpu->arch.apf.host_apf_flags || !enable_ept;
|
||||
return vcpu->arch.apf.host_apf_flags ||
|
||||
vmx_need_pf_intercept(vcpu);
|
||||
else if (is_debug(intr_info) &&
|
||||
vcpu->guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
||||
|
|
|
@ -538,7 +538,7 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
|
|||
|
||||
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
|
||||
{
|
||||
return vmx->secondary_exec_control &
|
||||
return secondary_exec_controls_get(vmx) &
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ BEGIN {
|
|||
|
||||
/^GNU objdump/ {
|
||||
verstr = ""
|
||||
gsub(/\(.*\)/, "");
|
||||
for (i = 3; i <= NF; i++)
|
||||
if (match($(i), "^[0-9]")) {
|
||||
verstr = $(i);
|
||||
|
|
|
@ -774,6 +774,7 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
|
|||
struct blkcg_gq *parent = blkg->parent;
|
||||
struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
|
||||
struct blkg_iostat cur, delta;
|
||||
unsigned long flags;
|
||||
unsigned int seq;
|
||||
|
||||
/* fetch the current per-cpu values */
|
||||
|
@ -783,21 +784,21 @@ static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
|
|||
} while (u64_stats_fetch_retry(&bisc->sync, seq));
|
||||
|
||||
/* propagate percpu delta to global */
|
||||
u64_stats_update_begin(&blkg->iostat.sync);
|
||||
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
|
||||
blkg_iostat_set(&delta, &cur);
|
||||
blkg_iostat_sub(&delta, &bisc->last);
|
||||
blkg_iostat_add(&blkg->iostat.cur, &delta);
|
||||
blkg_iostat_add(&bisc->last, &delta);
|
||||
u64_stats_update_end(&blkg->iostat.sync);
|
||||
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
|
||||
|
||||
/* propagate global delta to parent (unless that's root) */
|
||||
if (parent && parent->parent) {
|
||||
u64_stats_update_begin(&parent->iostat.sync);
|
||||
flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
|
||||
blkg_iostat_set(&delta, &blkg->iostat.cur);
|
||||
blkg_iostat_sub(&delta, &blkg->iostat.last);
|
||||
blkg_iostat_add(&parent->iostat.cur, &delta);
|
||||
blkg_iostat_add(&blkg->iostat.last, &delta);
|
||||
u64_stats_update_end(&parent->iostat.sync);
|
||||
u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -832,6 +833,7 @@ static void blkcg_fill_root_iostats(void)
|
|||
memset(&tmp, 0, sizeof(tmp));
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct disk_stats *cpu_dkstats;
|
||||
unsigned long flags;
|
||||
|
||||
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
|
||||
tmp.ios[BLKG_IOSTAT_READ] +=
|
||||
|
@ -848,9 +850,9 @@ static void blkcg_fill_root_iostats(void)
|
|||
tmp.bytes[BLKG_IOSTAT_DISCARD] +=
|
||||
cpu_dkstats->sectors[STAT_DISCARD] << 9;
|
||||
|
||||
u64_stats_update_begin(&blkg->iostat.sync);
|
||||
flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
|
||||
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
||||
u64_stats_update_end(&blkg->iostat.sync);
|
||||
u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3021,6 +3021,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
|
||||
struct nd_mapping_desc *mapping;
|
||||
|
||||
/* range index 0 == unmapped in SPA or invalid-SPA */
|
||||
if (memdev->range_index == 0 || spa->range_index == 0)
|
||||
continue;
|
||||
if (memdev->range_index != spa->range_index)
|
||||
continue;
|
||||
if (count >= ND_MAX_MAPPINGS) {
|
||||
|
|
|
@ -2809,6 +2809,7 @@ void device_initialize(struct device *dev)
|
|||
device_pm_init(dev);
|
||||
set_dev_node(dev, -1);
|
||||
#ifdef CONFIG_GENERIC_MSI_IRQ
|
||||
raw_spin_lock_init(&dev->msi_lock);
|
||||
INIT_LIST_HEAD(&dev->msi_list);
|
||||
#endif
|
||||
INIT_LIST_HEAD(&dev->links.consumers);
|
||||
|
|
|
@ -805,6 +805,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
|
|||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
/* don't abort one completed request */
|
||||
if (blk_mq_request_completed(req))
|
||||
return true;
|
||||
|
||||
mutex_lock(&cmd->lock);
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
mutex_unlock(&cmd->lock);
|
||||
|
@ -1973,15 +1977,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
|||
{
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_disconnect(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
sock_shutdown(nbd);
|
||||
/*
|
||||
* Make sure recv thread has finished, so it does not drop the last
|
||||
* config ref and try to destroy the workqueue from inside the work
|
||||
* queue.
|
||||
* queue. And this also ensure that we can safely call nbd_clear_que()
|
||||
* to cancel the inflight I/Os.
|
||||
*/
|
||||
if (nbd->recv_workq)
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
nbd_clear_que(nbd);
|
||||
nbd->task_setup = NULL;
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
|
||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
|
|
|
@ -35,15 +35,48 @@ efi_status_t check_platform_features(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Although relocatable kernels can fix up the misalignment with respect to
|
||||
* MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
|
||||
* sync with those recorded in the vmlinux when kaslr is disabled but the
|
||||
* image required relocation anyway. Therefore retain 2M alignment unless
|
||||
* KASLR is in use.
|
||||
* Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
|
||||
* to provide space, and fail to zero it). Check for this condition by double
|
||||
* checking that the first and the last byte of the image are covered by the
|
||||
* same EFI memory map entry.
|
||||
*/
|
||||
static u64 min_kimg_align(void)
|
||||
static bool check_image_region(u64 base, u64 size)
|
||||
{
|
||||
return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
||||
unsigned long map_size, desc_size, buff_size;
|
||||
efi_memory_desc_t *memory_map;
|
||||
struct efi_boot_memmap map;
|
||||
efi_status_t status;
|
||||
bool ret = false;
|
||||
int map_offset;
|
||||
|
||||
map.map = &memory_map;
|
||||
map.map_size = &map_size;
|
||||
map.desc_size = &desc_size;
|
||||
map.desc_ver = NULL;
|
||||
map.key_ptr = NULL;
|
||||
map.buff_size = &buff_size;
|
||||
|
||||
status = efi_get_memory_map(&map);
|
||||
if (status != EFI_SUCCESS)
|
||||
return false;
|
||||
|
||||
for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
|
||||
efi_memory_desc_t *md = (void *)memory_map + map_offset;
|
||||
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Find the region that covers base, and return whether
|
||||
* it covers base+size bytes.
|
||||
*/
|
||||
if (base >= md->phys_addr && base < end) {
|
||||
ret = (base + size) <= end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
efi_bs_call(free_pool, memory_map);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||
|
@ -56,6 +89,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
unsigned long kernel_size, kernel_memsize = 0;
|
||||
u32 phys_seed = 0;
|
||||
|
||||
/*
|
||||
* Although relocatable kernels can fix up the misalignment with
|
||||
* respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
|
||||
* subtly out of sync with those recorded in the vmlinux when kaslr is
|
||||
* disabled but the image required relocation anyway. Therefore retain
|
||||
* 2M alignment if KASLR was explicitly disabled, even if it was not
|
||||
* going to be activated to begin with.
|
||||
*/
|
||||
u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
if (!efi_nokaslr) {
|
||||
status = efi_get_random_bytes(sizeof(phys_seed),
|
||||
|
@ -76,6 +119,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
if (image->image_base != _text)
|
||||
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
|
||||
|
||||
if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
|
||||
efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
|
||||
EFI_KIMG_ALIGN >> 10);
|
||||
|
||||
kernel_size = _edata - _text;
|
||||
kernel_memsize = kernel_size + (_end - _edata);
|
||||
*reserve_size = kernel_memsize;
|
||||
|
@ -85,14 +132,16 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
* If KASLR is enabled, and we have some randomness available,
|
||||
* locate the kernel at a randomized offset in physical memory.
|
||||
*/
|
||||
status = efi_random_alloc(*reserve_size, min_kimg_align(),
|
||||
status = efi_random_alloc(*reserve_size, min_kimg_align,
|
||||
reserve_addr, phys_seed);
|
||||
} else {
|
||||
status = EFI_OUT_OF_RESOURCES;
|
||||
}
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
if (IS_ALIGNED((u64)_text, min_kimg_align())) {
|
||||
if (!check_image_region((u64)_text, kernel_memsize)) {
|
||||
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
|
||||
} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
|
||||
/*
|
||||
* Just execute from wherever we were loaded by the
|
||||
* UEFI PE/COFF loader if the alignment is suitable.
|
||||
|
@ -103,7 +152,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
|||
}
|
||||
|
||||
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
|
||||
ULONG_MAX, min_kimg_align());
|
||||
ULONG_MAX, min_kimg_align);
|
||||
|
||||
if (status != EFI_SUCCESS) {
|
||||
efi_err("Failed to relocate kernel\n");
|
||||
|
|
|
@ -30,6 +30,8 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
|
|||
|
||||
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
|
||||
(u64)ULONG_MAX);
|
||||
if (region_end < size)
|
||||
return 0;
|
||||
|
||||
first_slot = round_up(md->phys_addr, align);
|
||||
last_slot = round_down(region_end - size + 1, align);
|
||||
|
|
|
@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
|
|||
ip->major, ip->minor,
|
||||
ip->revision);
|
||||
|
||||
if (le16_to_cpu(ip->hw_id) == VCN_HWID)
|
||||
adev->vcn.num_vcn_inst++;
|
||||
|
||||
for (k = 0; k < num_base_address; k++) {
|
||||
/*
|
||||
* convert the endianness of base addresses in place,
|
||||
|
@ -377,7 +380,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||
{
|
||||
struct binary_header *bhdr;
|
||||
struct harvest_table *harvest_info;
|
||||
int i;
|
||||
int i, vcn_harvest_count = 0;
|
||||
|
||||
bhdr = (struct binary_header *)adev->mman.discovery_bin;
|
||||
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
|
||||
|
@ -389,8 +392,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||
|
||||
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
|
||||
case VCN_HWID:
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
||||
vcn_harvest_count++;
|
||||
break;
|
||||
case DMU_HWID:
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
|
||||
|
@ -399,6 +401,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
|
||||
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1537,6 +1537,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
|||
pci_ignore_hotplug(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
|
||||
} else if (amdgpu_device_supports_boco(drm_dev)) {
|
||||
/* nothing to do */
|
||||
} else if (amdgpu_device_supports_baco(drm_dev)) {
|
||||
amdgpu_device_baco_enter(drm_dev);
|
||||
}
|
||||
|
|
|
@ -9410,7 +9410,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
|||
} else if (amdgpu_freesync_vid_mode && aconnector &&
|
||||
is_freesync_video_mode(&new_crtc_state->mode,
|
||||
aconnector)) {
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
struct drm_display_mode *high_mode;
|
||||
|
||||
high_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
|
|
|
@ -584,7 +584,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
|
|||
handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
|
||||
|
||||
/*allocate a new amdgpu_dm_irq_handler_data*/
|
||||
handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
|
||||
handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
|
||||
if (!handler_data_add) {
|
||||
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
|
||||
return;
|
||||
|
|
|
@ -1788,7 +1788,6 @@ static bool dcn30_split_stream_for_mpc_or_odm(
|
|||
}
|
||||
pri_pipe->next_odm_pipe = sec_pipe;
|
||||
sec_pipe->prev_odm_pipe = pri_pipe;
|
||||
ASSERT(sec_pipe->top_pipe == NULL);
|
||||
|
||||
if (!sec_pipe->top_pipe)
|
||||
sec_pipe->stream_res.opp = pool->opps[pipe_idx];
|
||||
|
|
|
@ -242,7 +242,7 @@ static int vangogh_tables_init(struct smu_context *smu)
|
|||
return 0;
|
||||
|
||||
err3_out:
|
||||
kfree(smu_table->clocks_table);
|
||||
kfree(smu_table->watermarks_table);
|
||||
err2_out:
|
||||
kfree(smu_table->gpu_metrics_table);
|
||||
err1_out:
|
||||
|
|
|
@ -5424,16 +5424,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
|
|||
|
||||
switch (crtc_state->pipe_bpp) {
|
||||
case 18:
|
||||
val |= PIPEMISC_DITHER_6_BPC;
|
||||
val |= PIPEMISC_6_BPC;
|
||||
break;
|
||||
case 24:
|
||||
val |= PIPEMISC_DITHER_8_BPC;
|
||||
val |= PIPEMISC_8_BPC;
|
||||
break;
|
||||
case 30:
|
||||
val |= PIPEMISC_DITHER_10_BPC;
|
||||
val |= PIPEMISC_10_BPC;
|
||||
break;
|
||||
case 36:
|
||||
val |= PIPEMISC_DITHER_12_BPC;
|
||||
/* Port output 12BPC defined for ADLP+ */
|
||||
if (DISPLAY_VER(dev_priv) > 12)
|
||||
val |= PIPEMISC_12_BPC_ADLP;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->pipe_bpp);
|
||||
|
@ -5469,15 +5471,27 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
|
|||
|
||||
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
|
||||
|
||||
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
|
||||
case PIPEMISC_DITHER_6_BPC:
|
||||
switch (tmp & PIPEMISC_BPC_MASK) {
|
||||
case PIPEMISC_6_BPC:
|
||||
return 18;
|
||||
case PIPEMISC_DITHER_8_BPC:
|
||||
case PIPEMISC_8_BPC:
|
||||
return 24;
|
||||
case PIPEMISC_DITHER_10_BPC:
|
||||
case PIPEMISC_10_BPC:
|
||||
return 30;
|
||||
case PIPEMISC_DITHER_12_BPC:
|
||||
return 36;
|
||||
/*
|
||||
* PORT OUTPUT 12 BPC defined for ADLP+.
|
||||
*
|
||||
* TODO:
|
||||
* For previous platforms with DSI interface, bits 5:7
|
||||
* are used for storing pipe_bpp irrespective of dithering.
|
||||
* Since the value of 12 BPC is not defined for these bits
|
||||
* on older platforms, need to find a workaround for 12 BPC
|
||||
* MIPI DSI HW readout.
|
||||
*/
|
||||
case PIPEMISC_12_BPC_ADLP:
|
||||
if (DISPLAY_VER(dev_priv) > 12)
|
||||
return 36;
|
||||
fallthrough;
|
||||
default:
|
||||
MISSING_CASE(tmp);
|
||||
return 0;
|
||||
|
|
|
@ -3149,6 +3149,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(_MMIO(0xb110), D_BDW);
|
||||
MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
|
||||
|
||||
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
|
||||
D_BDW_PLUS, NULL, force_nonpriv_write);
|
||||
|
|
|
@ -105,6 +105,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
{RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
|
||||
{RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
|
||||
{RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
|
||||
{RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
|
||||
{RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
|
||||
{RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
|
||||
{RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
|
||||
{RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
||||
|
|
|
@ -728,9 +728,18 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
|
|||
if (INTEL_GEN(m->i915) >= 12) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
|
||||
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
|
||||
/*
|
||||
* SFC_DONE resides in the VD forcewake domain, so it
|
||||
* only exists if the corresponding VCS engine is
|
||||
* present.
|
||||
*/
|
||||
if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
|
||||
continue;
|
||||
|
||||
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
|
||||
gt->sfc_done[i]);
|
||||
}
|
||||
|
||||
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
|
||||
}
|
||||
|
@ -1586,6 +1595,14 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
|
|||
|
||||
if (INTEL_GEN(i915) >= 12) {
|
||||
for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
|
||||
/*
|
||||
* SFC_DONE resides in the VD forcewake domain, so it
|
||||
* only exists if the corresponding VCS engine is
|
||||
* present.
|
||||
*/
|
||||
if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
|
||||
continue;
|
||||
|
||||
gt->sfc_done[i] =
|
||||
intel_uncore_read(uncore, GEN12_SFC_DONE(i));
|
||||
}
|
||||
|
|
|
@ -6134,11 +6134,17 @@ enum {
|
|||
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
|
||||
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
|
||||
#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
|
||||
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
|
||||
#define PIPEMISC_DITHER_8_BPC (0 << 5)
|
||||
#define PIPEMISC_DITHER_10_BPC (1 << 5)
|
||||
#define PIPEMISC_DITHER_6_BPC (2 << 5)
|
||||
#define PIPEMISC_DITHER_12_BPC (3 << 5)
|
||||
/*
|
||||
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
|
||||
* valid values of: 6, 8, 10 BPC.
|
||||
* ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
|
||||
* 6, 8, 10, 12 BPC.
|
||||
*/
|
||||
#define PIPEMISC_BPC_MASK (7 << 5)
|
||||
#define PIPEMISC_8_BPC (0 << 5)
|
||||
#define PIPEMISC_10_BPC (1 << 5)
|
||||
#define PIPEMISC_6_BPC (2 << 5)
|
||||
#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */
|
||||
#define PIPEMISC_DITHER_ENABLE (1 << 4)
|
||||
#define PIPEMISC_DITHER_TYPE_MASK (3 << 2)
|
||||
#define PIPEMISC_DITHER_TYPE_SP (0 << 2)
|
||||
|
|
|
@ -532,13 +532,10 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
|
|||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
const struct drm_plane_helper_funcs *plane_helper_funcs =
|
||||
plane->helper_private;
|
||||
|
||||
if (!mtk_crtc->enabled)
|
||||
return;
|
||||
|
||||
plane_helper_funcs->atomic_update(plane, state);
|
||||
mtk_drm_crtc_update_config(mtk_crtc, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -110,6 +110,35 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
|
|||
true, true);
|
||||
}
|
||||
|
||||
static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
|
||||
struct mtk_plane_state *mtk_plane_state)
|
||||
{
|
||||
struct drm_framebuffer *fb = new_state->fb;
|
||||
struct drm_gem_object *gem;
|
||||
struct mtk_drm_gem_obj *mtk_gem;
|
||||
unsigned int pitch, format;
|
||||
dma_addr_t addr;
|
||||
|
||||
gem = fb->obj[0];
|
||||
mtk_gem = to_mtk_gem_obj(gem);
|
||||
addr = mtk_gem->dma_addr;
|
||||
pitch = fb->pitches[0];
|
||||
format = fb->format->format;
|
||||
|
||||
addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
|
||||
addr += (new_state->src.y1 >> 16) * pitch;
|
||||
|
||||
mtk_plane_state->pending.enable = true;
|
||||
mtk_plane_state->pending.pitch = pitch;
|
||||
mtk_plane_state->pending.format = format;
|
||||
mtk_plane_state->pending.addr = addr;
|
||||
mtk_plane_state->pending.x = new_state->dst.x1;
|
||||
mtk_plane_state->pending.y = new_state->dst.y1;
|
||||
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
|
||||
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
|
||||
mtk_plane_state->pending.rotation = new_state->rotation;
|
||||
}
|
||||
|
||||
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -126,8 +155,10 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
|
|||
plane->state->src_h = new_state->src_h;
|
||||
plane->state->src_w = new_state->src_w;
|
||||
swap(plane->state->fb, new_state->fb);
|
||||
new_plane_state->pending.async_dirty = true;
|
||||
|
||||
mtk_plane_update_new_state(new_state, new_plane_state);
|
||||
wmb(); /* Make sure the above parameters are set before update */
|
||||
new_plane_state->pending.async_dirty = true;
|
||||
mtk_drm_crtc_async_update(new_state->crtc, plane, state);
|
||||
}
|
||||
|
||||
|
@ -189,14 +220,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
|
||||
plane);
|
||||
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
|
||||
struct drm_crtc *crtc = new_state->crtc;
|
||||
struct drm_framebuffer *fb = new_state->fb;
|
||||
struct drm_gem_object *gem;
|
||||
struct mtk_drm_gem_obj *mtk_gem;
|
||||
unsigned int pitch, format;
|
||||
dma_addr_t addr;
|
||||
|
||||
if (!crtc || WARN_ON(!fb))
|
||||
if (!new_state->crtc || WARN_ON(!new_state->fb))
|
||||
return;
|
||||
|
||||
if (!new_state->visible) {
|
||||
|
@ -204,24 +229,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
gem = fb->obj[0];
|
||||
mtk_gem = to_mtk_gem_obj(gem);
|
||||
addr = mtk_gem->dma_addr;
|
||||
pitch = fb->pitches[0];
|
||||
format = fb->format->format;
|
||||
|
||||
addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
|
||||
addr += (new_state->src.y1 >> 16) * pitch;
|
||||
|
||||
mtk_plane_state->pending.enable = true;
|
||||
mtk_plane_state->pending.pitch = pitch;
|
||||
mtk_plane_state->pending.format = format;
|
||||
mtk_plane_state->pending.addr = addr;
|
||||
mtk_plane_state->pending.x = new_state->dst.x1;
|
||||
mtk_plane_state->pending.y = new_state->dst.y1;
|
||||
mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
|
||||
mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
|
||||
mtk_plane_state->pending.rotation = new_state->rotation;
|
||||
mtk_plane_update_new_state(new_state, mtk_plane_state);
|
||||
wmb(); /* Make sure the above parameters are set before update */
|
||||
mtk_plane_state->pending.dirty = true;
|
||||
}
|
||||
|
|
|
@ -634,6 +634,11 @@
|
|||
#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
|
||||
#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
|
||||
|
||||
/* osd1 HDR */
|
||||
#define OSD1_HDR2_CTRL 0x38a0
|
||||
#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13)
|
||||
#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16)
|
||||
|
||||
/* osd2 scaler */
|
||||
#define OSD2_VSC_PHASE_STEP 0x3d00
|
||||
#define OSD2_VSC_INI_PHASE 0x3d01
|
||||
|
|
|
@ -425,9 +425,14 @@ void meson_viu_init(struct meson_drm *priv)
|
|||
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
|
||||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
|
||||
meson_viu_load_matrix(priv);
|
||||
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
|
||||
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
|
||||
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
|
||||
true);
|
||||
/* fix green/pink color distortion from vendor u-boot */
|
||||
writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
|
||||
OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
|
||||
priv->io_base + _REG(OSD1_HDR2_CTRL));
|
||||
}
|
||||
|
||||
/* Initialize OSD1 fifo control register */
|
||||
reg = VIU_OSD_DDR_PRIORITY_URGENT |
|
||||
|
|
|
@ -1224,14 +1224,14 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
|
|||
|
||||
disable_irq(iproc_i2c->irq);
|
||||
|
||||
tasklet_kill(&iproc_i2c->slave_rx_tasklet);
|
||||
|
||||
/* disable all slave interrupts */
|
||||
tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
|
||||
tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
|
||||
IE_S_ALL_INTERRUPT_SHIFT);
|
||||
iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
|
||||
|
||||
tasklet_kill(&iproc_i2c->slave_rx_tasklet);
|
||||
|
||||
/* Erase the slave address programmed */
|
||||
tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
|
||||
tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
|
||||
|
|
|
@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
|
|||
if (count > 8192)
|
||||
count = 8192;
|
||||
|
||||
tmp = kmalloc(count, GFP_KERNEL);
|
||||
tmp = kzalloc(count, GFP_KERNEL);
|
||||
if (tmp == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
|
|||
|
||||
ret = i2c_master_recv(client, tmp, count);
|
||||
if (ret >= 0)
|
||||
ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
|
||||
if (copy_to_user(buf, tmp, ret))
|
||||
ret = -EFAULT;
|
||||
kfree(tmp);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -664,8 +664,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
|
|||
|
||||
adc_period = adc->auto_conversion_period;
|
||||
for (i = 0; i < 16; ++i) {
|
||||
if (((1000 * (1 << i)) / 32) < adc_period)
|
||||
continue;
|
||||
if (((1000 * (1 << i)) / 32) >= adc_period)
|
||||
break;
|
||||
}
|
||||
if (i > 0)
|
||||
i--;
|
||||
|
|
|
@ -568,7 +568,6 @@ static int ti_ads7950_probe(struct spi_device *spi)
|
|||
st->ring_xfer.tx_buf = &st->tx_buf[0];
|
||||
st->ring_xfer.rx_buf = &st->rx_buf[0];
|
||||
/* len will be set later */
|
||||
st->ring_xfer.cs_change = true;
|
||||
|
||||
spi_message_add_tail(&st->ring_xfer, &st->ring_msg);
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include <linux/iio/trigger_consumer.h>
|
||||
#include <linux/iio/triggered_buffer.h>
|
||||
|
||||
#include <linux/time.h>
|
||||
|
||||
#define HDC100X_REG_TEMP 0x00
|
||||
#define HDC100X_REG_HUMIDITY 0x01
|
||||
|
||||
|
@ -166,7 +168,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
|
|||
struct iio_chan_spec const *chan)
|
||||
{
|
||||
struct i2c_client *client = data->client;
|
||||
int delay = data->adc_int_us[chan->address];
|
||||
int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
|
||||
int ret;
|
||||
__be16 val;
|
||||
|
||||
|
@ -316,7 +318,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
|
|||
struct iio_dev *indio_dev = pf->indio_dev;
|
||||
struct hdc100x_data *data = iio_priv(indio_dev);
|
||||
struct i2c_client *client = data->client;
|
||||
int delay = data->adc_int_us[0] + data->adc_int_us[1];
|
||||
int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
|
||||
int ret;
|
||||
|
||||
/* dual read starts at temp register */
|
||||
|
|
|
@ -415,12 +415,11 @@ int __adis_initial_startup(struct adis *adis)
|
|||
int ret;
|
||||
|
||||
/* check if the device has rst pin low */
|
||||
gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_ASIS);
|
||||
gpio = devm_gpiod_get_optional(&adis->spi->dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(gpio))
|
||||
return PTR_ERR(gpio);
|
||||
|
||||
if (gpio) {
|
||||
gpiod_set_value_cansleep(gpio, 1);
|
||||
msleep(10);
|
||||
/* bring device out of reset */
|
||||
gpiod_set_value_cansleep(gpio, 0);
|
||||
|
|
|
@ -941,7 +941,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
u32 *cqb = NULL;
|
||||
void *cqc;
|
||||
int cqe_size;
|
||||
unsigned int irqn;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
|
@ -980,7 +979,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
|
||||
}
|
||||
|
||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
|
||||
if (err)
|
||||
goto err_cqb;
|
||||
|
||||
|
@ -1003,7 +1002,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
goto err_cqb;
|
||||
|
||||
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
|
||||
cq->mcq.irqn = irqn;
|
||||
if (udata)
|
||||
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
|
||||
else
|
||||
|
|
|
@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||
struct mlx5_ib_dev *dev;
|
||||
int user_vector;
|
||||
int dev_eqn;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
|
||||
if (uverbs_copy_from(&user_vector, attrs,
|
||||
|
@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
|
|||
return PTR_ERR(c);
|
||||
dev = to_mdev(c->ibucontext.device);
|
||||
|
||||
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
|
||||
err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -71,12 +71,18 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
|||
family = AF_INET6;
|
||||
|
||||
if (bareudp->ethertype == htons(ETH_P_IP)) {
|
||||
struct iphdr *iphdr;
|
||||
__u8 ipversion;
|
||||
|
||||
iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
|
||||
if (iphdr->version == 4) {
|
||||
proto = bareudp->ethertype;
|
||||
} else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
|
||||
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
|
||||
sizeof(ipversion))) {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
goto drop;
|
||||
}
|
||||
ipversion >>= 4;
|
||||
|
||||
if (ipversion == 4) {
|
||||
proto = htons(ETH_P_IP);
|
||||
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
|
||||
proto = htons(ETH_P_IPV6);
|
||||
} else {
|
||||
bareudp->dev->stats.rx_dropped++;
|
||||
|
|
|
@ -912,6 +912,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct hellcreek *hellcreek = ds->priv;
|
||||
u16 entries;
|
||||
int ret = 0;
|
||||
size_t i;
|
||||
|
||||
mutex_lock(&hellcreek->reg_lock);
|
||||
|
@ -944,12 +945,14 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
|
|||
if (!(entry.portmask & BIT(port)))
|
||||
continue;
|
||||
|
||||
cb(entry.mac, 0, entry.is_static, data);
|
||||
ret = cb(entry.mac, 0, entry.is_static, data);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&hellcreek->reg_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
|
||||
|
|
|
@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
||||
int portmap, void *ctx);
|
||||
typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
|
||||
int portmap, void *ctx);
|
||||
|
||||
static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
||||
static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
||||
{
|
||||
int i;
|
||||
int ret = 0, i;
|
||||
|
||||
mutex_lock(&chip->alr_mutex);
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||
|
@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
|
|||
LAN9303_ALR_DAT1_PORT_BITOFFS;
|
||||
portmap = alrport_2_portmap[alrport];
|
||||
|
||||
cb(chip, dat0, dat1, portmap, ctx);
|
||||
ret = cb(chip, dat0, dat1, portmap, ctx);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
|
||||
LAN9303_ALR_CMD_GET_NEXT);
|
||||
lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
|
||||
}
|
||||
mutex_unlock(&chip->alr_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
|
||||
|
@ -606,18 +610,20 @@ struct del_port_learned_ctx {
|
|||
};
|
||||
|
||||
/* Clear learned (non-static) entry on given port */
|
||||
static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
{
|
||||
struct del_port_learned_ctx *del_ctx = ctx;
|
||||
int port = del_ctx->port;
|
||||
|
||||
if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/* learned entries has only one port, we can just delete */
|
||||
dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
|
||||
lan9303_alr_make_entry_raw(chip, dat0, dat1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct port_fdb_dump_ctx {
|
||||
|
@ -626,19 +632,19 @@ struct port_fdb_dump_ctx {
|
|||
dsa_fdb_dump_cb_t *cb;
|
||||
};
|
||||
|
||||
static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
|
||||
u32 dat1, int portmap, void *ctx)
|
||||
{
|
||||
struct port_fdb_dump_ctx *dump_ctx = ctx;
|
||||
u8 mac[ETH_ALEN];
|
||||
bool is_static;
|
||||
|
||||
if ((BIT(dump_ctx->port) & portmap) == 0)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
alr_reg_to_mac(dat0, dat1, mac);
|
||||
is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
|
||||
dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
||||
return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
|
||||
}
|
||||
|
||||
/* Set a static ALR entry. Delete entry if port_map is zero */
|
||||
|
@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||
};
|
||||
|
||||
dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
|
||||
lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
||||
|
||||
return 0;
|
||||
return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
|
||||
}
|
||||
|
||||
static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
|
||||
|
|
|
@ -1404,11 +1404,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
|
|||
addr[1] = mac_bridge.key[2] & 0xff;
|
||||
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
|
||||
if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
|
||||
if (mac_bridge.val[0] & BIT(port))
|
||||
cb(addr, 0, true, data);
|
||||
if (mac_bridge.val[0] & BIT(port)) {
|
||||
err = cb(addr, 0, true, data);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
|
||||
cb(addr, 0, false, data);
|
||||
if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
|
||||
err = cb(addr, 0, false, data);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -684,8 +684,8 @@ static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
|
|||
shifts = ksz8->shifts;
|
||||
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
|
||||
addr *= dev->phy_port_cnt;
|
||||
for (i = 0; i < dev->phy_port_cnt; i++) {
|
||||
addr *= 4;
|
||||
for (i = 0; i < 4; i++) {
|
||||
dev->vlan_cache[addr + i].table[0] = (u16)data;
|
||||
data >>= shifts[VLAN_TABLE];
|
||||
}
|
||||
|
@ -699,7 +699,7 @@ static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
|
|||
u64 buf;
|
||||
|
||||
data = (u16 *)&buf;
|
||||
addr = vid / dev->phy_port_cnt;
|
||||
addr = vid / 4;
|
||||
index = vid & 3;
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||
*vlan = data[index];
|
||||
|
@ -713,7 +713,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
|
|||
u64 buf;
|
||||
|
||||
data = (u16 *)&buf;
|
||||
addr = vid / dev->phy_port_cnt;
|
||||
addr = vid / 4;
|
||||
index = vid & 3;
|
||||
ksz8_r_table(dev, TABLE_VLAN, addr, &buf);
|
||||
data[index] = vlan;
|
||||
|
@ -1078,24 +1078,67 @@ static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
|
|||
if (ksz_is_ksz88x3(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Discard packets with VID not enabled on the switch */
|
||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
|
||||
|
||||
/* Discard packets with VID not enabled on the ingress port */
|
||||
for (port = 0; port < dev->phy_port_cnt; ++port)
|
||||
ksz_port_cfg(dev, port, REG_PORT_CTRL_2, PORT_INGRESS_FILTER,
|
||||
flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
|
||||
{
|
||||
if (ksz_is_ksz88x3(dev)) {
|
||||
ksz_cfg(dev, REG_SW_INSERT_SRC_PVID,
|
||||
0x03 << (4 - 2 * port), state);
|
||||
} else {
|
||||
ksz_pwrite8(dev, port, REG_PORT_CTRL_12, state ? 0x0f : 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
struct ksz_device *dev = ds->priv;
|
||||
struct ksz_port *p = &dev->ports[port];
|
||||
u16 data, new_pvid = 0;
|
||||
u8 fid, member, valid;
|
||||
|
||||
if (ksz_is_ksz88x3(dev))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
/* If a VLAN is added with untagged flag different from the
|
||||
* port's Remove Tag flag, we need to change the latter.
|
||||
* Ignore VID 0, which is always untagged.
|
||||
* Ignore CPU port, which will always be tagged.
|
||||
*/
|
||||
if (untagged != p->remove_tag && vlan->vid != 0 &&
|
||||
port != dev->cpu_port) {
|
||||
unsigned int vid;
|
||||
|
||||
/* Reject attempts to add a VLAN that requires the
|
||||
* Remove Tag flag to be changed, unless there are no
|
||||
* other VLANs currently configured.
|
||||
*/
|
||||
for (vid = 1; vid < dev->num_vlans; ++vid) {
|
||||
/* Skip the VID we are going to add or reconfigure */
|
||||
if (vid == vlan->vid)
|
||||
continue;
|
||||
|
||||
ksz8_from_vlan(dev, dev->vlan_cache[vid].table[0],
|
||||
&fid, &member, &valid);
|
||||
if (valid && (member & BIT(port)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
p->remove_tag = untagged;
|
||||
}
|
||||
|
||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||
|
@ -1119,9 +1162,11 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||
u16 vid;
|
||||
|
||||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
|
||||
vid &= 0xfff;
|
||||
vid &= ~VLAN_VID_MASK;
|
||||
vid |= new_pvid;
|
||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
|
||||
|
||||
ksz8_port_enable_pvid(dev, port, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1130,9 +1175,8 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
|
|||
static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
||||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
|
||||
struct ksz_device *dev = ds->priv;
|
||||
u16 data, pvid, new_pvid = 0;
|
||||
u16 data, pvid;
|
||||
u8 fid, member, valid;
|
||||
|
||||
if (ksz_is_ksz88x3(dev))
|
||||
|
@ -1141,8 +1185,6 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||
ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
|
||||
pvid = pvid & 0xFFF;
|
||||
|
||||
ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
|
||||
|
||||
ksz8_r_vlan_table(dev, vlan->vid, &data);
|
||||
ksz8_from_vlan(dev, data, &fid, &member, &valid);
|
||||
|
||||
|
@ -1154,14 +1196,11 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
|
|||
valid = 0;
|
||||
}
|
||||
|
||||
if (pvid == vlan->vid)
|
||||
new_pvid = 1;
|
||||
|
||||
ksz8_to_vlan(dev, fid, member, valid, &data);
|
||||
ksz8_w_vlan_table(dev, vlan->vid, data);
|
||||
|
||||
if (new_pvid != pvid)
|
||||
ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
|
||||
if (pvid == vlan->vid)
|
||||
ksz8_port_enable_pvid(dev, port, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1394,6 +1433,9 @@ static int ksz8_setup(struct dsa_switch *ds)
|
|||
|
||||
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
|
||||
|
||||
if (!ksz_is_ksz88x3(dev))
|
||||
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
|
||||
|
||||
/* set broadcast storm protection 10% rate */
|
||||
regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
|
||||
BROADCAST_STORM_RATE,
|
||||
|
@ -1621,6 +1663,16 @@ static int ksz8_switch_init(struct ksz_device *dev)
|
|||
/* set the real number of ports */
|
||||
dev->ds->num_ports = dev->port_cnt;
|
||||
|
||||
/* We rely on software untagging on the CPU port, so that we
|
||||
* can support both tagged and untagged VLANs
|
||||
*/
|
||||
dev->ds->untag_bridge_pvid = true;
|
||||
|
||||
/* VLAN filtering is partly controlled by the global VLAN
|
||||
* Enable flag
|
||||
*/
|
||||
dev->ds->vlan_filtering_is_global = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -631,6 +631,10 @@
|
|||
#define REG_PORT_4_OUT_RATE_3 0xEE
|
||||
#define REG_PORT_5_OUT_RATE_3 0xFE
|
||||
|
||||
/* 88x3 specific */
|
||||
|
||||
#define REG_SW_INSERT_SRC_PVID 0xC2
|
||||
|
||||
/* PME */
|
||||
|
||||
#define SW_PME_OUTPUT_ENABLE BIT(1)
|
||||
|
|
|
@ -27,6 +27,7 @@ struct ksz_port_mib {
|
|||
struct ksz_port {
|
||||
u16 member;
|
||||
u16 vid_member;
|
||||
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
|
||||
int stp_state;
|
||||
struct phy_device phydev;
|
||||
|
||||
|
@ -205,12 +206,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
|
|||
int ret;
|
||||
|
||||
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
|
||||
if (!ret) {
|
||||
/* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
|
||||
value[0] = swab32(value[0]);
|
||||
value[1] = swab32(value[1]);
|
||||
*val = swab64((u64)*value);
|
||||
}
|
||||
if (!ret)
|
||||
*val = (u64)value[0] << 32 | value[1];
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -46,6 +46,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
|
|||
MIB_DESC(2, 0x48, "TxBytes"),
|
||||
MIB_DESC(1, 0x60, "RxDrop"),
|
||||
MIB_DESC(1, 0x64, "RxFiltering"),
|
||||
MIB_DESC(1, 0x68, "RxUnicast"),
|
||||
MIB_DESC(1, 0x6c, "RxMulticast"),
|
||||
MIB_DESC(1, 0x70, "RxBroadcast"),
|
||||
MIB_DESC(1, 0x74, "RxAlignErr"),
|
||||
|
|
|
@ -101,6 +101,23 @@
|
|||
AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
|
||||
AR9331_SW_PORT_STATUS_SPEED_M)
|
||||
|
||||
#define AR9331_SW_REG_PORT_CTRL(_port) (0x104 + (_port) * 0x100)
|
||||
#define AR9331_SW_PORT_CTRL_HEAD_EN BIT(11)
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE GENMASK(2, 0)
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_DISABLED 0
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_BLOCKING 1
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_LISTENING 2
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_LEARNING 3
|
||||
#define AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD 4
|
||||
|
||||
#define AR9331_SW_REG_PORT_VLAN(_port) (0x108 + (_port) * 0x100)
|
||||
#define AR9331_SW_PORT_VLAN_8021Q_MODE GENMASK(31, 30)
|
||||
#define AR9331_SW_8021Q_MODE_SECURE 3
|
||||
#define AR9331_SW_8021Q_MODE_CHECK 2
|
||||
#define AR9331_SW_8021Q_MODE_FALLBACK 1
|
||||
#define AR9331_SW_8021Q_MODE_NONE 0
|
||||
#define AR9331_SW_PORT_VLAN_PORT_VID_MEMBER GENMASK(25, 16)
|
||||
|
||||
/* MIB registers */
|
||||
#define AR9331_MIB_COUNTER(x) (0x20000 + ((x) * 0x100))
|
||||
|
||||
|
@ -371,11 +388,59 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ar9331_sw_setup_port(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||
struct regmap *regmap = priv->regmap;
|
||||
u32 port_mask, port_ctrl, val;
|
||||
int ret;
|
||||
|
||||
/* Generate default port settings */
|
||||
port_ctrl = FIELD_PREP(AR9331_SW_PORT_CTRL_PORT_STATE,
|
||||
AR9331_SW_PORT_CTRL_PORT_STATE_FORWARD);
|
||||
|
||||
if (dsa_is_cpu_port(ds, port)) {
|
||||
/* CPU port should be allowed to communicate with all user
|
||||
* ports.
|
||||
*/
|
||||
port_mask = dsa_user_ports(ds);
|
||||
/* Enable Atheros header on CPU port. This will allow us
|
||||
* communicate with each port separately
|
||||
*/
|
||||
port_ctrl |= AR9331_SW_PORT_CTRL_HEAD_EN;
|
||||
} else if (dsa_is_user_port(ds, port)) {
|
||||
/* User ports should communicate only with the CPU port.
|
||||
*/
|
||||
port_mask = BIT(dsa_upstream_port(ds, port));
|
||||
} else {
|
||||
/* Other ports do not need to communicate at all */
|
||||
port_mask = 0;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(AR9331_SW_PORT_VLAN_8021Q_MODE,
|
||||
AR9331_SW_8021Q_MODE_NONE) |
|
||||
FIELD_PREP(AR9331_SW_PORT_VLAN_PORT_VID_MEMBER, port_mask);
|
||||
|
||||
ret = regmap_write(regmap, AR9331_SW_REG_PORT_VLAN(port), val);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = regmap_write(regmap, AR9331_SW_REG_PORT_CTRL(port), port_ctrl);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
error:
|
||||
dev_err(priv->dev, "%s: error: %i\n", __func__, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ar9331_sw_setup(struct dsa_switch *ds)
|
||||
{
|
||||
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
|
||||
struct regmap *regmap = priv->regmap;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = ar9331_sw_reset(priv);
|
||||
if (ret)
|
||||
|
@ -402,6 +467,12 @@ static int ar9331_sw_setup(struct dsa_switch *ds)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < ds->num_ports; i++) {
|
||||
ret = ar9331_sw_setup_port(ds, i);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ds->configure_vlan_while_not_filtering = false;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1625,7 +1625,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
|
|||
/* We need to hide the dsa_8021q VLANs from the user. */
|
||||
if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
|
||||
l2_lookup.vlanid = 0;
|
||||
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1506,11 +1506,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
|
|||
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||||
|
||||
iavf_map_rings_to_vectors(adapter);
|
||||
|
||||
if (RSS_AQ(adapter))
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
|
||||
else
|
||||
err = iavf_init_rss(adapter);
|
||||
err:
|
||||
return err;
|
||||
}
|
||||
|
@ -2200,6 +2195,14 @@ continue_reset:
|
|||
goto reset_err;
|
||||
}
|
||||
|
||||
if (RSS_AQ(adapter)) {
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
|
||||
} else {
|
||||
err = iavf_init_rss(adapter);
|
||||
if (err)
|
||||
goto reset_err;
|
||||
}
|
||||
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
|
||||
|
||||
|
|
|
@ -226,6 +226,7 @@ enum ice_pf_state {
|
|||
ICE_VFLR_EVENT_PENDING,
|
||||
ICE_FLTR_OVERFLOW_PROMISC,
|
||||
ICE_VF_DIS,
|
||||
ICE_VF_DEINIT_IN_PROGRESS,
|
||||
ICE_CFG_BUSY,
|
||||
ICE_SERVICE_SCHED,
|
||||
ICE_SERVICE_DIS,
|
||||
|
|
|
@ -183,6 +183,14 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
|
|||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
|
||||
/* Under some circumstances, we might receive a request to delete our
|
||||
* own device address from our uc list. Because we store the device
|
||||
* address in the VSI's MAC filter list, we need to ignore such
|
||||
* requests and not delete our device address from this list.
|
||||
*/
|
||||
if (ether_addr_equal(addr, netdev->dev_addr))
|
||||
return 0;
|
||||
|
||||
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
|
||||
ICE_FWD_TO_VSI))
|
||||
return -EINVAL;
|
||||
|
@ -4014,6 +4022,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
struct ice_hw *hw;
|
||||
int i, err;
|
||||
|
||||
if (pdev->is_virtfn) {
|
||||
dev_err(dev, "can't probe a virtual function\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* this driver uses devres, see
|
||||
* Documentation/driver-api/driver-model/devres.rst
|
||||
*/
|
||||
|
@ -4908,7 +4921,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (ether_addr_equal(netdev->dev_addr, mac)) {
|
||||
netdev_warn(netdev, "already using mac %pM\n", mac);
|
||||
netdev_dbg(netdev, "already using mac %pM\n", mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4919,6 +4932,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(netdev);
|
||||
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
|
||||
status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
|
||||
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
|
||||
|
@ -4928,30 +4942,28 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
|
|||
|
||||
/* Add filter for new MAC. If filter exists, return success */
|
||||
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
|
||||
if (status == ICE_ERR_ALREADY_EXISTS) {
|
||||
if (status == ICE_ERR_ALREADY_EXISTS)
|
||||
/* Although this MAC filter is already present in hardware it's
|
||||
* possible in some cases (e.g. bonding) that dev_addr was
|
||||
* modified outside of the driver and needs to be restored back
|
||||
* to this value.
|
||||
*/
|
||||
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
||||
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* error if the new filter addition failed */
|
||||
if (status)
|
||||
else if (status)
|
||||
/* error if the new filter addition failed */
|
||||
err = -EADDRNOTAVAIL;
|
||||
|
||||
err_update_filters:
|
||||
if (err) {
|
||||
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
|
||||
mac);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* change the netdev's MAC address */
|
||||
memcpy(netdev->dev_addr, mac, netdev->addr_len);
|
||||
netif_addr_unlock_bh(netdev);
|
||||
netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
|
||||
netdev->dev_addr);
|
||||
|
||||
|
|
|
@ -615,6 +615,8 @@ void ice_free_vfs(struct ice_pf *pf)
|
|||
struct ice_hw *hw = &pf->hw;
|
||||
unsigned int tmp, i;
|
||||
|
||||
set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
|
||||
if (!pf->vf)
|
||||
return;
|
||||
|
||||
|
@ -680,6 +682,7 @@ void ice_free_vfs(struct ice_pf *pf)
|
|||
i);
|
||||
|
||||
clear_bit(ICE_VF_DIS, pf->state);
|
||||
clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
|
||||
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
|
||||
}
|
||||
|
||||
|
@ -4292,6 +4295,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
|||
struct device *dev;
|
||||
int err = 0;
|
||||
|
||||
/* if de-init is underway, don't process messages from VF */
|
||||
if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
|
||||
return;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (ice_validate_vf_id(pf, vf_id)) {
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -938,7 +938,7 @@ enum mvpp22_ptp_packet_format {
|
|||
#define MVPP2_BM_COOKIE_POOL_OFFS 8
|
||||
#define MVPP2_BM_COOKIE_CPU_OFFS 24
|
||||
|
||||
#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */
|
||||
#define MVPP2_BM_SHORT_FRAME_SIZE 736 /* frame size 128 */
|
||||
#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */
|
||||
#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */
|
||||
/* BM short pool packet size
|
||||
|
|
|
@ -134,6 +134,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
|||
cq->cqn);
|
||||
|
||||
cq->uar = dev->priv.uar;
|
||||
cq->irqn = eq->core.irqn;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1019,12 +1019,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
|
|||
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
|
||||
mlx5_eq_notifier_register(dev, &tracer->nb);
|
||||
|
||||
mlx5_fw_tracer_start(tracer);
|
||||
|
||||
err = mlx5_fw_tracer_start(tracer);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
|
||||
goto err_notifier_unregister;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_notifier_unregister:
|
||||
mlx5_eq_notifier_unregister(dev, &tracer->nb);
|
||||
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
|
||||
err_dealloc_pd:
|
||||
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
|
||||
cancel_work_sync(&tracer->read_fw_strings_work);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
|
|||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
if (rt->rt_type != RTN_UNICAST) {
|
||||
ret = -ENETUNREACH;
|
||||
goto err_rt_release;
|
||||
}
|
||||
|
||||
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
|
||||
ret = -ENETUNREACH;
|
||||
goto err_rt_release;
|
||||
|
|
|
@ -1531,15 +1531,9 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
|
|||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_core_cq *mcq = &cq->mcq;
|
||||
int eqn_not_used;
|
||||
unsigned int irqn;
|
||||
int err;
|
||||
u32 i;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
|
||||
&cq->wq_ctrl);
|
||||
if (err)
|
||||
|
@ -1553,7 +1547,6 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
|
|||
mcq->vector = param->eq_ix;
|
||||
mcq->comp = mlx5e_completion_event;
|
||||
mcq->event = mlx5e_cq_error_event;
|
||||
mcq->irqn = irqn;
|
||||
|
||||
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
|
||||
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
|
||||
|
@ -1601,11 +1594,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
void *in;
|
||||
void *cqc;
|
||||
int inlen;
|
||||
unsigned int irqn_not_used;
|
||||
int eqn;
|
||||
int err;
|
||||
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -1887,30 +1879,30 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
|
|||
if (err)
|
||||
goto err_close_icosq;
|
||||
|
||||
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
|
||||
if (c->xdp) {
|
||||
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
|
||||
&c->rq_xdpsq, false);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
goto err_close_rq;
|
||||
}
|
||||
|
||||
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
|
||||
if (err)
|
||||
goto err_close_xdp_sq;
|
||||
|
||||
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
|
||||
if (err)
|
||||
goto err_close_rq;
|
||||
goto err_close_xdp_sq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_close_rq:
|
||||
mlx5e_close_rq(&c->rq);
|
||||
|
||||
err_close_xdp_sq:
|
||||
if (c->xdp)
|
||||
mlx5e_close_xdpsq(&c->rq_xdpsq);
|
||||
|
||||
err_close_rq:
|
||||
mlx5e_close_rq(&c->rq);
|
||||
|
||||
err_close_sqs:
|
||||
mlx5e_close_sqs(c);
|
||||
|
||||
|
@ -1945,9 +1937,9 @@ err_close_async_icosq_cq:
|
|||
static void mlx5e_close_queues(struct mlx5e_channel *c)
|
||||
{
|
||||
mlx5e_close_xdpsq(&c->xdpsq);
|
||||
mlx5e_close_rq(&c->rq);
|
||||
if (c->xdp)
|
||||
mlx5e_close_xdpsq(&c->rq_xdpsq);
|
||||
mlx5e_close_rq(&c->rq);
|
||||
mlx5e_close_sqs(c);
|
||||
mlx5e_close_icosq(&c->icosq);
|
||||
mlx5e_close_icosq(&c->async_icosq);
|
||||
|
@ -1979,9 +1971,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
struct mlx5e_channel *c;
|
||||
unsigned int irq;
|
||||
int err;
|
||||
int eqn;
|
||||
|
||||
err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
||||
err = mlx5_vector2irqn(priv->mdev, ix, &irq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -871,8 +871,8 @@ clean:
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
||||
unsigned int *irqn)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_eq_comp *eq, *n;
|
||||
|
@ -881,8 +881,10 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
|||
|
||||
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
|
||||
if (i++ == vector) {
|
||||
*eqn = eq->core.eqn;
|
||||
*irqn = eq->core.irqn;
|
||||
if (irqn)
|
||||
*irqn = eq->core.irqn;
|
||||
if (eqn)
|
||||
*eqn = eq->core.eqn;
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -890,8 +892,18 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
|
|||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
|
||||
{
|
||||
return vector2eqnirqn(dev, vector, eqn, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_vector2eqn);
|
||||
|
||||
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
|
||||
{
|
||||
return vector2eqnirqn(dev, vector, NULL, irqn);
|
||||
}
|
||||
|
||||
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.eq_table->num_comp_eqs;
|
||||
|
|
|
@ -501,6 +501,7 @@ err_sampler:
|
|||
err_offload_rule:
|
||||
mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
|
||||
err_default_tbl:
|
||||
kfree(sample_flow);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include "lib/fs_chains.h"
|
||||
#include "en_tc.h"
|
||||
#include "en/mapping.h"
|
||||
#include "devlink.h"
|
||||
|
||||
#define mlx5_esw_for_each_rep(esw, i, rep) \
|
||||
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
|
||||
|
@ -2984,12 +2985,19 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
|||
if (cur_mlx5_mode == mlx5_mode)
|
||||
goto unlock;
|
||||
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
|
||||
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't change mode while devlink traps are active");
|
||||
err = -EOPNOTSUPP;
|
||||
goto unlock;
|
||||
}
|
||||
err = esw_offloads_start(esw, extack);
|
||||
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
|
||||
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
|
||||
err = esw_offloads_stop(esw, extack);
|
||||
else
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mlx5_esw_unlock(esw);
|
||||
|
|
|
@ -417,7 +417,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
struct mlx5_wq_param wqp;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int inlen, err, eqn;
|
||||
unsigned int irqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
u32 i;
|
||||
|
@ -446,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
goto err_cqwq;
|
||||
}
|
||||
|
||||
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -476,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
*conn->cq.mcq.arm_db = 0;
|
||||
conn->cq.mcq.vector = 0;
|
||||
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
|
||||
conn->cq.mcq.irqn = irqn;
|
||||
conn->cq.mcq.uar = fdev->conn_res.uar;
|
||||
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
|
||||
|
||||
|
|
|
@ -103,4 +103,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
|||
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
|
||||
#endif
|
||||
|
||||
int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1781,16 +1781,14 @@ static int __init init(void)
|
|||
if (err)
|
||||
goto err_sf;
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
err = mlx5e_init();
|
||||
if (err) {
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
goto err_debug;
|
||||
}
|
||||
#endif
|
||||
if (err)
|
||||
goto err_en;
|
||||
|
||||
return 0;
|
||||
|
||||
err_en:
|
||||
mlx5_sf_driver_unregister();
|
||||
err_sf:
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
err_debug:
|
||||
|
@ -1800,9 +1798,7 @@ err_debug:
|
|||
|
||||
static void __exit cleanup(void)
|
||||
{
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
mlx5e_cleanup();
|
||||
#endif
|
||||
mlx5_sf_driver_unregister();
|
||||
pci_unregister_driver(&mlx5_core_driver);
|
||||
mlx5_unregister_debugfs();
|
||||
|
|
|
@ -223,8 +223,13 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
|
|||
int mlx5_fw_version_query(struct mlx5_core_dev *dev,
|
||||
u32 *running_ver, u32 *stored_ver);
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN
|
||||
int mlx5e_init(void);
|
||||
void mlx5e_cleanup(void);
|
||||
#else
|
||||
static inline int mlx5e_init(void){ return 0; }
|
||||
static inline void mlx5e_cleanup(void){}
|
||||
#endif
|
||||
|
||||
static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
|
||||
{
|
||||
|
|
|
@ -749,7 +749,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
struct mlx5_cqe64 *cqe;
|
||||
struct mlx5dr_cq *cq;
|
||||
int inlen, err, eqn;
|
||||
unsigned int irqn;
|
||||
void *cqc, *in;
|
||||
__be64 *pas;
|
||||
int vector;
|
||||
|
@ -782,7 +781,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
goto err_cqwq;
|
||||
|
||||
vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
|
||||
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
|
||||
err = mlx5_vector2eqn(mdev, vector, &eqn);
|
||||
if (err) {
|
||||
kvfree(in);
|
||||
goto err_cqwq;
|
||||
|
@ -818,7 +817,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
|
|||
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
|
||||
|
||||
cq->mcq.vector = 0;
|
||||
cq->mcq.irqn = irqn;
|
||||
cq->mcq.uar = uar;
|
||||
|
||||
return cq;
|
||||
|
|
|
@ -352,6 +352,7 @@ static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
|
|||
{
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
|
||||
DR_STE_TUNL_ACTION_DECAP);
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
|
||||
}
|
||||
|
||||
static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
|
||||
|
@ -365,6 +366,7 @@ static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
|
|||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
|
||||
DR_STE_TUNL_ACTION_L3_DECAP);
|
||||
MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
|
||||
MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
|
||||
}
|
||||
|
||||
static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
|
||||
|
|
|
@ -920,7 +920,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
|||
struct cpdma_chan *txch;
|
||||
int ret, q_idx;
|
||||
|
||||
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
|
||||
if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
|
||||
cpsw_err(priv, tx_err, "packet pad failed\n");
|
||||
ndev->stats.tx_dropped++;
|
||||
return NET_XMIT_DROP;
|
||||
|
@ -1100,7 +1100,7 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
|||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = frames[i];
|
||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||
if (xdpf->len < READ_ONCE(priv->tx_packet_min))
|
||||
break;
|
||||
|
||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
|
||||
|
@ -1389,6 +1389,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
|
|||
priv->dev = dev;
|
||||
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
|
||||
priv->emac_port = i + 1;
|
||||
priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
|
||||
|
||||
if (is_valid_ether_addr(slave_data->mac_addr)) {
|
||||
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
|
||||
|
@ -1686,6 +1687,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
|
|||
|
||||
priv = netdev_priv(sl_ndev);
|
||||
slave->port_vlan = vlan;
|
||||
WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
|
||||
if (netif_running(sl_ndev))
|
||||
cpsw_port_add_switch_def_ale_entries(priv,
|
||||
slave);
|
||||
|
@ -1714,6 +1716,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
|
|||
|
||||
priv = netdev_priv(slave->ndev);
|
||||
slave->port_vlan = slave->data->dual_emac_res_vlan;
|
||||
WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
|
||||
cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,8 @@ do { \
|
|||
|
||||
#define CPSW_POLL_WEIGHT 64
|
||||
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
|
||||
#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
|
||||
#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
|
||||
#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
|
||||
#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
|
||||
ETH_FCS_LEN +\
|
||||
CPSW_RX_VLAN_ENCAP_HDR_SIZE)
|
||||
|
@ -380,6 +381,7 @@ struct cpsw_priv {
|
|||
u32 emac_port;
|
||||
struct cpsw_common *cpsw;
|
||||
int offload_fwd_mark;
|
||||
u32 tx_packet_min;
|
||||
};
|
||||
|
||||
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
|
||||
|
|
|
@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
|
|||
struct hwsim_edge *e;
|
||||
u32 v0, v1;
|
||||
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
|
|||
u32 v0, v1;
|
||||
u8 lqi;
|
||||
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
|
||||
if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
|
||||
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
|
||||
return -EINVAL;
|
||||
|
||||
if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
|
||||
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
|
||||
!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1406,8 +1406,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.name = "Micrel KSZ87XX Switch",
|
||||
/* PHY_BASIC_FEATURES */
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = ksz8873mll_config_aneg,
|
||||
.read_status = ksz8873mll_read_status,
|
||||
.match_phy_device = ksz8795_match_phy_device,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
|
|
|
@ -1317,7 +1317,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
|
|||
* the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
|
||||
* userspace to infer the device name using to the PPPIOCGUNIT ioctl.
|
||||
*/
|
||||
if (!tb[IFLA_IFNAME])
|
||||
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
|
||||
conf.ifname_is_set = false;
|
||||
|
||||
err = ppp_dev_configure(src_net, dev, &conf);
|
||||
|
|
|
@ -41,14 +41,14 @@ struct mhi_wwan_dev {
|
|||
/* Increment RX budget and schedule RX refill if necessary */
|
||||
static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
|
||||
{
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
mhiwwan->rx_budget++;
|
||||
|
||||
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
|
||||
schedule_work(&mhiwwan->rx_refill);
|
||||
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
}
|
||||
|
||||
/* Decrement RX budget if non-zero and return true on success */
|
||||
|
@ -56,7 +56,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
|
|||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
if (mhiwwan->rx_budget) {
|
||||
mhiwwan->rx_budget--;
|
||||
|
@ -64,7 +64,7 @@ static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
|
|||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -130,9 +130,9 @@ static void mhi_wwan_ctrl_stop(struct wwan_port *port)
|
|||
{
|
||||
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
|
||||
|
||||
spin_lock(&mhiwwan->rx_lock);
|
||||
spin_lock_bh(&mhiwwan->rx_lock);
|
||||
clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
|
||||
spin_unlock(&mhiwwan->rx_lock);
|
||||
spin_unlock_bh(&mhiwwan->rx_lock);
|
||||
|
||||
cancel_work_sync(&mhiwwan->rx_refill);
|
||||
|
||||
|
|
|
@ -2527,7 +2527,7 @@ static void deactivate_labels(void *region)
|
|||
|
||||
static int init_active_labels(struct nd_region *nd_region)
|
||||
{
|
||||
int i;
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||
|
@ -2546,13 +2546,14 @@ static int init_active_labels(struct nd_region *nd_region)
|
|||
else if (test_bit(NDD_LABELING, &nvdimm->flags))
|
||||
/* fail, labels needed to disambiguate dpa */;
|
||||
else
|
||||
return 0;
|
||||
continue;
|
||||
|
||||
dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
|
||||
dev_name(&nd_mapping->nvdimm->dev),
|
||||
test_bit(NDD_LOCKED, &nvdimm->flags)
|
||||
? "locked" : "disabled");
|
||||
return -ENXIO;
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
nd_mapping->ndd = ndd;
|
||||
atomic_inc(&nvdimm->busy);
|
||||
|
@ -2586,13 +2587,17 @@ static int init_active_labels(struct nd_region *nd_region)
|
|||
break;
|
||||
}
|
||||
|
||||
if (i < nd_region->ndr_mappings) {
|
||||
if (i < nd_region->ndr_mappings)
|
||||
rc = -ENOMEM;
|
||||
|
||||
out:
|
||||
if (rc) {
|
||||
deactivate_labels(nd_region);
|
||||
return -ENOMEM;
|
||||
return rc;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
|
||||
nd_region);
|
||||
nd_region);
|
||||
}
|
||||
|
||||
int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
|
||||
|
|
|
@ -143,24 +143,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
|
|||
* reliably as devices without an INTx disable bit will then generate a
|
||||
* level IRQ which will never be cleared.
|
||||
*/
|
||||
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
{
|
||||
u32 mask_bits = desc->masked;
|
||||
raw_spinlock_t *lock = &desc->dev->msi_lock;
|
||||
unsigned long flags;
|
||||
|
||||
if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
mask_bits &= ~mask;
|
||||
mask_bits |= flag;
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
desc->masked &= ~mask;
|
||||
desc->masked |= flag;
|
||||
pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
|
||||
mask_bits);
|
||||
|
||||
return mask_bits;
|
||||
desc->masked);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
|
||||
{
|
||||
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
|
||||
__pci_msi_desc_mask_irq(desc, mask, flag);
|
||||
}
|
||||
|
||||
static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
|
||||
|
@ -289,13 +290,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|||
/* Don't touch the hardware now */
|
||||
} else if (entry->msi_attrib.is_msix) {
|
||||
void __iomem *base = pci_msix_desc_addr(entry);
|
||||
bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
|
||||
|
||||
if (!base)
|
||||
goto skip;
|
||||
|
||||
/*
|
||||
* The specification mandates that the entry is masked
|
||||
* when the message is modified:
|
||||
*
|
||||
* "If software changes the Address or Data value of an
|
||||
* entry while the entry is unmasked, the result is
|
||||
* undefined."
|
||||
*/
|
||||
if (unmasked)
|
||||
__pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
|
||||
|
||||
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
|
||||
|
||||
if (unmasked)
|
||||
__pci_msix_desc_mask_irq(entry, 0);
|
||||
|
||||
/* Ensure that the writes are visible in the device */
|
||||
readl(base + PCI_MSIX_ENTRY_DATA);
|
||||
} else {
|
||||
int pos = dev->msi_cap;
|
||||
u16 msgctl;
|
||||
|
@ -316,6 +335,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|||
pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
|
||||
msg->data);
|
||||
}
|
||||
/* Ensure that the writes are visible in the device */
|
||||
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
|
||||
}
|
||||
|
||||
skip:
|
||||
|
@ -636,21 +657,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
|
|||
/* Configure MSI capability structure */
|
||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
|
||||
if (ret) {
|
||||
msi_mask_irq(entry, mask, ~mask);
|
||||
msi_mask_irq(entry, mask, 0);
|
||||
free_msi_irqs(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = msi_verify_entries(dev);
|
||||
if (ret) {
|
||||
msi_mask_irq(entry, mask, ~mask);
|
||||
msi_mask_irq(entry, mask, 0);
|
||||
free_msi_irqs(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = populate_msi_sysfs(dev);
|
||||
if (ret) {
|
||||
msi_mask_irq(entry, mask, ~mask);
|
||||
msi_mask_irq(entry, mask, 0);
|
||||
free_msi_irqs(dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -691,6 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|||
{
|
||||
struct irq_affinity_desc *curmsk, *masks = NULL;
|
||||
struct msi_desc *entry;
|
||||
void __iomem *addr;
|
||||
int ret, i;
|
||||
int vec_count = pci_msix_vec_count(dev);
|
||||
|
||||
|
@ -711,6 +733,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|||
|
||||
entry->msi_attrib.is_msix = 1;
|
||||
entry->msi_attrib.is_64 = 1;
|
||||
|
||||
if (entries)
|
||||
entry->msi_attrib.entry_nr = entries[i].entry;
|
||||
else
|
||||
|
@ -722,6 +745,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
|
|||
entry->msi_attrib.default_irq = dev->irq;
|
||||
entry->mask_base = base;
|
||||
|
||||
addr = pci_msix_desc_addr(entry);
|
||||
if (addr)
|
||||
entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
|
||||
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
|
||||
if (masks)
|
||||
curmsk++;
|
||||
|
@ -732,28 +759,27 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void msix_program_entries(struct pci_dev *dev,
|
||||
struct msix_entry *entries)
|
||||
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
|
||||
{
|
||||
struct msi_desc *entry;
|
||||
int i = 0;
|
||||
void __iomem *desc_addr;
|
||||
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
if (entries)
|
||||
entries[i++].vector = entry->irq;
|
||||
|
||||
desc_addr = pci_msix_desc_addr(entry);
|
||||
if (desc_addr)
|
||||
entry->masked = readl(desc_addr +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
else
|
||||
entry->masked = 0;
|
||||
|
||||
msix_mask_irq(entry, 1);
|
||||
if (entries) {
|
||||
entries->vector = entry->irq;
|
||||
entries++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void msix_mask_all(void __iomem *base, int tsize)
|
||||
{
|
||||
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
|
||||
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
/**
|
||||
* msix_capability_init - configure device's MSI-X capability
|
||||
* @dev: pointer to the pci_dev data structure of MSI-X device function
|
||||
|
@ -768,22 +794,33 @@ static void msix_program_entries(struct pci_dev *dev,
|
|||
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int nvec, struct irq_affinity *affd)
|
||||
{
|
||||
int ret;
|
||||
u16 control;
|
||||
void __iomem *base;
|
||||
int ret, tsize;
|
||||
u16 control;
|
||||
|
||||
/* Ensure MSI-X is disabled while it is set up */
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
/*
|
||||
* Some devices require MSI-X to be enabled before the MSI-X
|
||||
* registers can be accessed. Mask all the vectors to prevent
|
||||
* interrupts coming in before they're fully set up.
|
||||
*/
|
||||
pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
|
||||
PCI_MSIX_FLAGS_ENABLE);
|
||||
|
||||
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
|
||||
/* Request & Map MSI-X table region */
|
||||
base = msix_map_region(dev, msix_table_size(control));
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
tsize = msix_table_size(control);
|
||||
base = msix_map_region(dev, tsize);
|
||||
if (!base) {
|
||||
ret = -ENOMEM;
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/* Ensure that all table entries are masked. */
|
||||
msix_mask_all(base, tsize);
|
||||
|
||||
ret = msix_setup_entries(dev, base, entries, nvec, affd);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_disable;
|
||||
|
||||
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
|
||||
if (ret)
|
||||
|
@ -794,15 +831,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
|
|||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* Some devices require MSI-X to be enabled before we can touch the
|
||||
* MSI-X registers. We need to mask all the vectors to prevent
|
||||
* interrupts coming in before they're fully set up.
|
||||
*/
|
||||
pci_msix_clear_and_set_ctrl(dev, 0,
|
||||
PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
|
||||
|
||||
msix_program_entries(dev, entries);
|
||||
msix_update_entries(dev, entries);
|
||||
|
||||
ret = populate_msi_sysfs(dev);
|
||||
if (ret)
|
||||
|
@ -836,6 +865,9 @@ out_avail:
|
|||
out_free:
|
||||
free_msi_irqs(dev);
|
||||
|
||||
out_disable:
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -930,8 +962,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
|
|||
|
||||
/* Return the device with MSI unmasked as initial states */
|
||||
mask = msi_mask(desc->msi_attrib.multi_cap);
|
||||
/* Keep cached state to be restored */
|
||||
__pci_msi_desc_mask_irq(desc, mask, ~mask);
|
||||
msi_mask_irq(desc, mask, 0);
|
||||
|
||||
/* Restore dev->irq to its default pin-assertion IRQ */
|
||||
dev->irq = desc->msi_attrib.default_irq;
|
||||
|
@ -1016,10 +1047,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
|
|||
}
|
||||
|
||||
/* Return the device with MSI-X masked as initial states */
|
||||
for_each_pci_msi_entry(entry, dev) {
|
||||
/* Keep cached states to be restored */
|
||||
for_each_pci_msi_entry(entry, dev)
|
||||
__pci_msix_desc_mask_irq(entry, 1);
|
||||
}
|
||||
|
||||
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
|
||||
pci_intx_for_msi(dev, 1);
|
||||
|
|
|
@ -701,32 +701,32 @@ static const struct pinctrl_pin_desc tglh_pins[] = {
|
|||
|
||||
static const struct intel_padgroup tglh_community0_gpps[] = {
|
||||
TGL_GPP(0, 0, 24, 0), /* GPP_A */
|
||||
TGL_GPP(1, 25, 44, 128), /* GPP_R */
|
||||
TGL_GPP(2, 45, 70, 32), /* GPP_B */
|
||||
TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */
|
||||
TGL_GPP(1, 25, 44, 32), /* GPP_R */
|
||||
TGL_GPP(2, 45, 70, 64), /* GPP_B */
|
||||
TGL_GPP(3, 71, 78, 96), /* vGPIO_0 */
|
||||
};
|
||||
|
||||
static const struct intel_padgroup tglh_community1_gpps[] = {
|
||||
TGL_GPP(0, 79, 104, 96), /* GPP_D */
|
||||
TGL_GPP(1, 105, 128, 64), /* GPP_C */
|
||||
TGL_GPP(2, 129, 136, 160), /* GPP_S */
|
||||
TGL_GPP(3, 137, 153, 192), /* GPP_G */
|
||||
TGL_GPP(4, 154, 180, 224), /* vGPIO */
|
||||
TGL_GPP(0, 79, 104, 128), /* GPP_D */
|
||||
TGL_GPP(1, 105, 128, 160), /* GPP_C */
|
||||
TGL_GPP(2, 129, 136, 192), /* GPP_S */
|
||||
TGL_GPP(3, 137, 153, 224), /* GPP_G */
|
||||
TGL_GPP(4, 154, 180, 256), /* vGPIO */
|
||||
};
|
||||
|
||||
static const struct intel_padgroup tglh_community3_gpps[] = {
|
||||
TGL_GPP(0, 181, 193, 256), /* GPP_E */
|
||||
TGL_GPP(1, 194, 217, 288), /* GPP_F */
|
||||
TGL_GPP(0, 181, 193, 288), /* GPP_E */
|
||||
TGL_GPP(1, 194, 217, 320), /* GPP_F */
|
||||
};
|
||||
|
||||
static const struct intel_padgroup tglh_community4_gpps[] = {
|
||||
TGL_GPP(0, 218, 241, 320), /* GPP_H */
|
||||
TGL_GPP(0, 218, 241, 352), /* GPP_H */
|
||||
TGL_GPP(1, 242, 251, 384), /* GPP_J */
|
||||
TGL_GPP(2, 252, 266, 352), /* GPP_K */
|
||||
TGL_GPP(2, 252, 266, 416), /* GPP_K */
|
||||
};
|
||||
|
||||
static const struct intel_padgroup tglh_community5_gpps[] = {
|
||||
TGL_GPP(0, 267, 281, 416), /* GPP_I */
|
||||
TGL_GPP(0, 267, 281, 448), /* GPP_I */
|
||||
TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */
|
||||
};
|
||||
|
||||
|
|
|
@ -925,12 +925,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
|
|||
err = hw->soc->bias_set(hw, desc, pullup);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (hw->soc->bias_set_combo) {
|
||||
err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
return -ENOTSUPP;
|
||||
err = mtk_pinconf_bias_set_rev1(hw, desc, pullup);
|
||||
if (err)
|
||||
err = mtk_pinconf_bias_set(hw, desc, pullup);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user