mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00
This is the 6.6.89 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmgUXsEACgkQONu9yGCS aT6/cA//Q0kwvXRkxVZzT1EO5jgC5hd84aheArdZ9zAz28EQ9tGL3Afchx7GpbGs DkmXAYKlRuBQJlO74ihDzpbgjSKMLKuPiH2FJF+8P5I3PPc0FLxQSPI2bDK7/oOE 5KnSXSW2sY8r63f6ZjeBFt3ubBa+SuNHTG1VAmSr3zGzKDttChgkt4cQFw7VAFH+ zgyQ/f9JVy6SZW+J7dHki48fuPbSlC+1xEocD1pBDGLG2IMUmSziCood9/E+wPPD i9CbfeBIVm0VHcb7C8KVj+hNuuQ7XF4Zq3oLhKME1boXf0E4sHDoLRRh25RPJg1j mZyOyyWyc2BfwXD/4ZyLgKIzgUMWQKDXh+7bQIqEgRAr2L7/siUUSSP4b5puA5TL BguHsPuBnAbqzAPHgXxg5X938gvvaiok0Z36QorfKx2/9hiygI6rZTGsoQ+7u8+H 8hpx8b58X1sysdi2yKFCfGAp1XxXy3FA/1EAJwNNFoy/FGB4Ucp5ezunuo8EkQjA gB3H2iywB8sSpMzbx2cDzZXH4ANvo1M8vMn/FnSpgkc1wa3P+pO4/ptShI4NU1xn tnj29ycd/44+FHqO/8CaRCjAyv+VS/nTuauIOo89qQuV3D4Ht8O9bzA9Mbo0Z+fg LuUFexDkq10mtpHiCqw5SmCmZJAdiO2r1hMVTolVlEUwYUB4YpM= =a9Qr -----END PGP SIGNATURE----- Merge tag 'v6.6.89' into v6.6/standard/base This is the 6.6.89 stable release # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmgUXsEACgkQONu9yGCS # aT6/cA//Q0kwvXRkxVZzT1EO5jgC5hd84aheArdZ9zAz28EQ9tGL3Afchx7GpbGs # DkmXAYKlRuBQJlO74ihDzpbgjSKMLKuPiH2FJF+8P5I3PPc0FLxQSPI2bDK7/oOE # 5KnSXSW2sY8r63f6ZjeBFt3ubBa+SuNHTG1VAmSr3zGzKDttChgkt4cQFw7VAFH+ # zgyQ/f9JVy6SZW+J7dHki48fuPbSlC+1xEocD1pBDGLG2IMUmSziCood9/E+wPPD # i9CbfeBIVm0VHcb7C8KVj+hNuuQ7XF4Zq3oLhKME1boXf0E4sHDoLRRh25RPJg1j # mZyOyyWyc2BfwXD/4ZyLgKIzgUMWQKDXh+7bQIqEgRAr2L7/siUUSSP4b5puA5TL # BguHsPuBnAbqzAPHgXxg5X938gvvaiok0Z36QorfKx2/9hiygI6rZTGsoQ+7u8+H # 8hpx8b58X1sysdi2yKFCfGAp1XxXy3FA/1EAJwNNFoy/FGB4Ucp5ezunuo8EkQjA # gB3H2iywB8sSpMzbx2cDzZXH4ANvo1M8vMn/FnSpgkc1wa3P+pO4/ptShI4NU1xn # tnj29ycd/44+FHqO/8CaRCjAyv+VS/nTuauIOo89qQuV3D4Ht8O9bzA9Mbo0Z+fg # LuUFexDkq10mtpHiCqw5SmCmZJAdiO2r1hMVTolVlEUwYUB4YpM= # =a9Qr # -----END PGP SIGNATURE----- # gpg: Signature made Fri 02 May 2025 01:57:21 AM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
26ad3d3b17
|
@ -39,14 +39,15 @@ per Hz, leading to::
|
|||
-------------------
|
||||
|
||||
Two different capacity values are used within the scheduler. A CPU's
|
||||
``capacity_orig`` is its maximum attainable capacity, i.e. its maximum
|
||||
attainable performance level. A CPU's ``capacity`` is its ``capacity_orig`` to
|
||||
which some loss of available performance (e.g. time spent handling IRQs) is
|
||||
subtracted.
|
||||
``original capacity`` is its maximum attainable capacity, i.e. its maximum
|
||||
attainable performance level. This original capacity is returned by
|
||||
the function arch_scale_cpu_capacity(). A CPU's ``capacity`` is its ``original
|
||||
capacity`` to which some loss of available performance (e.g. time spent
|
||||
handling IRQs) is subtracted.
|
||||
|
||||
Note that a CPU's ``capacity`` is solely intended to be used by the CFS class,
|
||||
while ``capacity_orig`` is class-agnostic. The rest of this document will use
|
||||
the term ``capacity`` interchangeably with ``capacity_orig`` for the sake of
|
||||
while ``original capacity`` is class-agnostic. The rest of this document will use
|
||||
the term ``capacity`` interchangeably with ``original capacity`` for the sake of
|
||||
brevity.
|
||||
|
||||
1.3 Platform examples
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 88
|
||||
SUBLEVEL = 89
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
|
|
|
@ -196,13 +196,6 @@
|
|||
wakeup-event-action = <EV_ACT_ASSERTED>;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
key-suspend {
|
||||
label = "Suspend";
|
||||
gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
|
||||
linux,input-type = <EV_KEY>;
|
||||
linux,code = <KEY_SLEEP>;
|
||||
};
|
||||
};
|
||||
|
||||
fan: pwm-fan {
|
||||
|
|
|
@ -59,6 +59,7 @@ config LOONGARCH
|
|||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
|
||||
|
|
|
@ -33,9 +33,9 @@ struct pt_regs {
|
|||
unsigned long __last[];
|
||||
} __aligned(8);
|
||||
|
||||
static inline int regs_irqs_disabled(struct pt_regs *regs)
|
||||
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return arch_irqs_disabled_flags(regs->csr_prmd);
|
||||
return !(regs->csr_prmd & CSR_PRMD_PIE);
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
|
|
|
@ -527,9 +527,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
|||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
#else
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int *pc;
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
|
||||
|
@ -556,7 +557,7 @@ sigbus:
|
|||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
irqentry_exit(regs, state);
|
||||
|
@ -588,12 +589,13 @@ static void bug_handler(struct pt_regs *regs)
|
|||
asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned long era = exception_era(regs);
|
||||
u64 badv = 0, lower = 0, upper = ULONG_MAX;
|
||||
union loongarch_instruction insn;
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
current->thread.trap_nr = read_csr_excode();
|
||||
|
@ -659,7 +661,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
|||
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
@ -677,11 +679,12 @@ bad_era:
|
|||
asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int opcode, bcode;
|
||||
unsigned long era = exception_era(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (__get_inst(&opcode, (u32 *)era, user))
|
||||
|
@ -747,7 +750,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
@ -982,6 +985,7 @@ static void init_restore_lbt(void)
|
|||
|
||||
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
{
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
/*
|
||||
|
@ -991,7 +995,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
|||
* (including the user using 'MOVGR2GCSR' to turn on TM, which
|
||||
* will not trigger the BTE), we need to check PRMD first.
|
||||
*/
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (!cpu_has_lbt) {
|
||||
|
@ -1005,7 +1009,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
|||
preempt_enable();
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
|
|
@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
|||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
return (pte_t *) pmd;
|
||||
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
|
|
|
@ -64,9 +64,6 @@ void __init paging_init(void)
|
|||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
|
|
|
@ -47,6 +47,16 @@ extern phys_addr_t __mips_cm_phys_base(void);
|
|||
*/
|
||||
extern int mips_cm_is64;
|
||||
|
||||
/*
|
||||
* mips_cm_is_l2_hci_broken - determine if HCI is broken
|
||||
*
|
||||
* Some CM reports show that Hardware Cache Initialization is
|
||||
* complete, but in reality it's not the case. They also incorrectly
|
||||
* indicate that Hardware Cache Initialization is supported. This
|
||||
* flags allows warning about this broken feature.
|
||||
*/
|
||||
extern bool mips_cm_is_l2_hci_broken;
|
||||
|
||||
/**
|
||||
* mips_cm_error_report - Report CM cache errors
|
||||
*/
|
||||
|
@ -85,6 +95,18 @@ static inline bool mips_cm_present(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* mips_cm_update_property - update property from the device tree
|
||||
*
|
||||
* Retrieve the properties from the device tree if a CM node exist and
|
||||
* update the internal variable based on this.
|
||||
*/
|
||||
#ifdef CONFIG_MIPS_CM
|
||||
extern void mips_cm_update_property(void);
|
||||
#else
|
||||
static inline void mips_cm_update_property(void) {}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* mips_cm_has_l2sync - determine whether an L2-only sync region is present
|
||||
*
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
|
@ -14,6 +15,7 @@
|
|||
void __iomem *mips_gcr_base;
|
||||
void __iomem *mips_cm_l2sync_base;
|
||||
int mips_cm_is64;
|
||||
bool mips_cm_is_l2_hci_broken;
|
||||
|
||||
static char *cm2_tr[8] = {
|
||||
"mem", "gcr", "gic", "mmio",
|
||||
|
@ -243,6 +245,18 @@ static void mips_cm_probe_l2sync(void)
|
|||
mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
|
||||
}
|
||||
|
||||
void mips_cm_update_property(void)
|
||||
{
|
||||
struct device_node *cm_node;
|
||||
|
||||
cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm");
|
||||
if (!cm_node)
|
||||
return;
|
||||
pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
|
||||
mips_cm_is_l2_hci_broken = true;
|
||||
of_node_put(cm_node);
|
||||
}
|
||||
|
||||
int mips_cm_probe(void)
|
||||
{
|
||||
phys_addr_t addr;
|
||||
|
|
|
@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
|
|||
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
|
||||
#define PDT_ADDR_SINGLE_ERR 1UL
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
/* report PDT entries via /proc/meminfo */
|
||||
void arch_report_meminfo(struct seq_file *m)
|
||||
{
|
||||
|
@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
|
|||
seq_printf(m, "PDT_cur_entries: %7lu\n",
|
||||
pdt_status.pdt_entries);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int get_info_pat_new(void)
|
||||
{
|
||||
|
|
|
@ -115,24 +115,19 @@
|
|||
\old_c
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
ALTERNATIVE_CFG old_c
|
||||
#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
|
||||
#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
||||
#define __ALTERNATIVE_CFG(old_c) \
|
||||
old_c "\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) \
|
||||
__ALTERNATIVE_CFG(old_c)
|
||||
#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
|
||||
#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
|
||||
#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
|
||||
|
||||
#endif /* CONFIG_RISCV_ALTERNATIVE */
|
||||
|
||||
/*
|
||||
|
|
|
@ -94,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
|
|||
|
||||
vcpu->stat.exit_validity++;
|
||||
trace_kvm_s390_intercept_validity(vcpu, viwhy);
|
||||
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
|
||||
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
|
||||
current->pid, vcpu->kvm);
|
||||
|
||||
/* do not warn on invalid runtime instrumentation mode */
|
||||
|
|
|
@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
|
|||
if (!gi->origin)
|
||||
return;
|
||||
gisa_clear_ipm(gi->origin);
|
||||
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
|
||||
VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
|
||||
}
|
||||
|
||||
void kvm_s390_gisa_init(struct kvm *kvm)
|
||||
|
@ -3178,7 +3178,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
|
|||
gi->timer.function = gisa_vcpu_kicker;
|
||||
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
|
||||
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
|
||||
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
|
||||
VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
|
||||
}
|
||||
|
||||
void kvm_s390_gisa_enable(struct kvm *kvm)
|
||||
|
@ -3219,7 +3219,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
|
|||
process_gib_alert_list();
|
||||
hrtimer_cancel(&gi->timer);
|
||||
gi->origin = NULL;
|
||||
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
|
||||
VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
|
||||
}
|
||||
|
||||
void kvm_s390_gisa_disable(struct kvm *kvm)
|
||||
|
@ -3468,7 +3468,7 @@ int __init kvm_s390_gib_init(u8 nisc)
|
|||
}
|
||||
}
|
||||
|
||||
KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
|
||||
KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
|
||||
goto out;
|
||||
|
||||
out_unreg_gal:
|
||||
|
|
|
@ -990,7 +990,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
|||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
|
||||
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
|
||||
VM_EVENT(kvm, 3, "New guest asce: 0x%p",
|
||||
(void *) kvm->arch.gmap->asce);
|
||||
break;
|
||||
}
|
||||
|
@ -3418,7 +3418,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
kvm_s390_gisa_init(kvm);
|
||||
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
|
||||
kvm->arch.pv.set_aside = NULL;
|
||||
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
|
||||
KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
|
@ -3481,7 +3481,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
kvm_s390_destroy_adapters(kvm);
|
||||
kvm_s390_clear_float_irqs(kvm);
|
||||
kvm_s390_vsie_destroy(kvm);
|
||||
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
|
||||
KVM_EVENT(3, "vm 0x%p destroyed", kvm);
|
||||
}
|
||||
|
||||
/* Section: vcpu related */
|
||||
|
@ -3602,7 +3602,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
|||
|
||||
free_page((unsigned long)old_sca);
|
||||
|
||||
VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
|
||||
VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
|
||||
old_sca, kvm->arch.sca);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3974,7 +3974,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
|||
goto out_free_sie_block;
|
||||
}
|
||||
|
||||
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
|
||||
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
|
||||
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
|
||||
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
|
|||
__entry->sie_block = sie_block;
|
||||
),
|
||||
|
||||
TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
|
||||
TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
|
||||
__entry->id, __entry->vcpu, __entry->sie_block)
|
||||
);
|
||||
|
||||
|
@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
|
|||
__entry->kvm = kvm;
|
||||
),
|
||||
|
||||
TP_printk("enabling channel I/O support (kvm @ %pK)\n",
|
||||
TP_printk("enabling channel I/O support (kvm @ %p)\n",
|
||||
__entry->kvm)
|
||||
);
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
SYM_FUNC_START(entry_ibpb)
|
||||
movl $MSR_IA32_PRED_CMD, %ecx
|
||||
movl $PRED_CMD_IBPB, %eax
|
||||
movl _ASM_RIP(x86_pred_cmd), %eax
|
||||
xorl %edx, %edx
|
||||
wrmsr
|
||||
|
||||
|
|
|
@ -623,7 +623,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
|
||||
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
|
|
@ -229,9 +229,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
|
|||
#define _ASM_EXTABLE_UA(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS)
|
||||
|
||||
#define _ASM_EXTABLE_CPY(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY)
|
||||
|
||||
#define _ASM_EXTABLE_FAULT(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#define EX_TYPE_DEFAULT 1
|
||||
#define EX_TYPE_FAULT 2
|
||||
#define EX_TYPE_UACCESS 3
|
||||
#define EX_TYPE_COPY 4
|
||||
/* unused, was: #define EX_TYPE_COPY 4 */
|
||||
#define EX_TYPE_CLEAR_FS 5
|
||||
#define EX_TYPE_FPU_RESTORE 6
|
||||
#define EX_TYPE_BPF 7
|
||||
|
|
|
@ -159,6 +159,8 @@
|
|||
#define INTEL_FAM6_GRANITERAPIDS_D 0xAE
|
||||
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
|
||||
|
||||
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
|
||||
|
||||
/* "Hybrid" Processors (P-Core/E-Core) */
|
||||
|
||||
#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
|
||||
|
|
|
@ -1574,7 +1574,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
|
|||
rrsba_disabled = true;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
|
||||
static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
|
||||
{
|
||||
/*
|
||||
* Similar to context switches, there are two types of RSB attacks
|
||||
|
@ -1598,27 +1598,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
|
|||
*/
|
||||
switch (mode) {
|
||||
case SPECTRE_V2_NONE:
|
||||
return;
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
case SPECTRE_V2_EIBRS:
|
||||
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
|
||||
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
|
||||
}
|
||||
return;
|
||||
|
||||
case SPECTRE_V2_EIBRS_LFENCE:
|
||||
case SPECTRE_V2_EIBRS_RETPOLINE:
|
||||
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
|
||||
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
|
||||
}
|
||||
break;
|
||||
|
||||
case SPECTRE_V2_RETPOLINE:
|
||||
case SPECTRE_V2_LFENCE:
|
||||
case SPECTRE_V2_IBRS:
|
||||
pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
|
||||
dump_stack();
|
||||
default:
|
||||
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
|
||||
dump_stack();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1844,10 +1847,7 @@ static void __init spectre_v2_select_mitigation(void)
|
|||
*
|
||||
* FIXME: Is this pointless for retbleed-affected AMD?
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
|
||||
spectre_v2_select_rsb_mitigation(mode);
|
||||
|
||||
/*
|
||||
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||
|
|
|
@ -288,14 +288,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
|
|||
copy_user = is_copy_from_user(regs);
|
||||
instrumentation_end();
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_UACCESS:
|
||||
case EX_TYPE_COPY:
|
||||
if (!copy_user)
|
||||
return IN_KERNEL;
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN;
|
||||
fallthrough;
|
||||
if (copy_user) {
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
|
||||
return IN_KERNEL_RECOV;
|
||||
}
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_FAULT_MCE_SAFE:
|
||||
case EX_TYPE_DEFAULT_MCE_SAFE:
|
||||
m->kflags |= MCE_IN_KERNEL_RECOV;
|
||||
|
|
|
@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
|
|||
* VMMs otherwise steal CPU time just to pointlessly waggle
|
||||
* the (masked) IRQ.
|
||||
*/
|
||||
clockevent_i8253_disable();
|
||||
scoped_guard(irq)
|
||||
clockevent_i8253_disable();
|
||||
return false;
|
||||
}
|
||||
clockevent_i8253_init(true);
|
||||
|
|
|
@ -820,7 +820,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
|
|||
* Allocating new amd_iommu_pi_data, which will get
|
||||
* add to the per-vcpu ir_list.
|
||||
*/
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
|
||||
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
|
||||
if (!ir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -896,6 +896,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
int idx, ret = 0;
|
||||
|
||||
if (!kvm_arch_has_assigned_device(kvm) ||
|
||||
|
@ -933,6 +934,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
kvm_vcpu_apicv_active(&svm->vcpu)) {
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
/* Try to enable guest_mode in IRTE */
|
||||
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
|
||||
AVIC_HPA_MASK);
|
||||
|
@ -951,33 +954,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
*/
|
||||
if (!ret && pi.is_guest_mode)
|
||||
svm_ir_list_add(svm, &pi);
|
||||
} else {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret && svm) {
|
||||
|
@ -993,6 +969,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
if (enable_remapped_mode) {
|
||||
/* Use legacy mode in IRTE */
|
||||
struct amd_iommu_pi_data pi;
|
||||
|
||||
/**
|
||||
* Here, pi is used to:
|
||||
* - Tell IOMMU to use legacy mode for this interrupt.
|
||||
* - Retrieve ga_tag of prior interrupt remapping data.
|
||||
*/
|
||||
pi.prev_ga_tag = 0;
|
||||
pi.is_guest_mode = false;
|
||||
ret = irq_set_vcpu_affinity(host_irq, &pi);
|
||||
|
||||
/**
|
||||
* Check if the posted interrupt was previously
|
||||
* setup with the guest_mode by checking if the ga_tag
|
||||
* was cached. If so, we need to clean up the per-vcpu
|
||||
* ir_list.
|
||||
*/
|
||||
if (!ret && pi.prev_ga_tag) {
|
||||
int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kvm_get_vcpu_by_id(kvm, id);
|
||||
if (vcpu)
|
||||
svm_ir_list_del(to_svm(vcpu), &pi);
|
||||
}
|
||||
}
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
return ret;
|
||||
|
|
|
@ -274,6 +274,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
{
|
||||
struct kvm_kernel_irq_routing_entry *e;
|
||||
struct kvm_irq_routing_table *irq_rt;
|
||||
bool enable_remapped_mode = true;
|
||||
struct kvm_lapic_irq irq;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct vcpu_data vcpu_info;
|
||||
|
@ -312,21 +313,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
|
||||
kvm_set_msi_irq(kvm, e, &irq);
|
||||
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
|
||||
!kvm_irq_is_postable(&irq)) {
|
||||
/*
|
||||
* Make sure the IRTE is in remapped mode if
|
||||
* we don't handle it in posted mode.
|
||||
*/
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO
|
||||
"failed to back to remapped mode, irq: %u\n",
|
||||
host_irq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
!kvm_irq_is_postable(&irq))
|
||||
continue;
|
||||
}
|
||||
|
||||
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
|
||||
vcpu_info.vector = irq.vector;
|
||||
|
@ -334,11 +322,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
|
||||
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
|
||||
|
||||
if (set)
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
else
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
if (!set)
|
||||
continue;
|
||||
|
||||
enable_remapped_mode = false;
|
||||
|
||||
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
|
||||
if (ret < 0) {
|
||||
printk(KERN_INFO "%s: failed to update PI IRTE\n",
|
||||
__func__);
|
||||
|
@ -346,6 +335,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
|||
}
|
||||
}
|
||||
|
||||
if (enable_remapped_mode)
|
||||
ret = irq_set_vcpu_affinity(host_irq, NULL);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
|
|
|
@ -13297,7 +13297,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
|
|||
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
|
||||
struct kvm_kernel_irq_routing_entry *new)
|
||||
{
|
||||
if (new->type != KVM_IRQ_ROUTING_MSI)
|
||||
if (old->type != KVM_IRQ_ROUTING_MSI ||
|
||||
new->type != KVM_IRQ_ROUTING_MSI)
|
||||
return true;
|
||||
|
||||
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
|
||||
|
|
|
@ -163,13 +163,6 @@ static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
|
|||
return ex_handler_default(fixup, regs);
|
||||
}
|
||||
|
||||
static bool ex_handler_copy(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
|
||||
return ex_handler_fault(fixup, regs, trapnr);
|
||||
}
|
||||
|
||||
static bool ex_handler_msr(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
|
||||
{
|
||||
|
@ -267,8 +260,6 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
|
|||
return ex_handler_fault(e, regs, trapnr);
|
||||
case EX_TYPE_UACCESS:
|
||||
return ex_handler_uaccess(e, regs, trapnr, fault_addr);
|
||||
case EX_TYPE_COPY:
|
||||
return ex_handler_copy(e, regs, trapnr);
|
||||
case EX_TYPE_CLEAR_FS:
|
||||
return ex_handler_clear_fs(e, regs);
|
||||
case EX_TYPE_FPU_RESTORE:
|
||||
|
|
|
@ -392,9 +392,9 @@ static void cond_mitigation(struct task_struct *next)
|
|||
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
|
||||
|
||||
/*
|
||||
* Avoid user/user BTB poisoning by flushing the branch predictor
|
||||
* when switching between processes. This stops one process from
|
||||
* doing Spectre-v2 attacks on another.
|
||||
* Avoid user->user BTB/RSB poisoning by flushing them when switching
|
||||
* between processes. This stops one process from doing Spectre-v2
|
||||
* attacks on another.
|
||||
*
|
||||
* Both, the conditional and the always IBPB mode use the mm
|
||||
* pointer to avoid the IBPB when switching between tasks of the
|
||||
|
|
|
@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
|
|||
xor %edx, %edx
|
||||
wrmsr
|
||||
|
||||
call xen_prepare_pvh
|
||||
/* Call xen_prepare_pvh() via the kernel virtual mapping */
|
||||
leaq xen_prepare_pvh(%rip), %rax
|
||||
subq phys_base(%rip), %rax
|
||||
addq $__START_KERNEL_map, %rax
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
call *%rax
|
||||
|
||||
/* startup_64 expects boot_params in %rsi. */
|
||||
mov $_pa(pvh_bootparams), %rsi
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
|
||||
static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
|
||||
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
|
||||
static int crypto_default_null_skcipher_refcnt;
|
||||
|
||||
|
@ -152,23 +152,32 @@ MODULE_ALIAS_CRYPTO("cipher_null");
|
|||
|
||||
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
|
||||
{
|
||||
struct crypto_sync_skcipher *ntfm = NULL;
|
||||
struct crypto_sync_skcipher *tfm;
|
||||
|
||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||
spin_lock_bh(&crypto_default_null_skcipher_lock);
|
||||
tfm = crypto_default_null_skcipher;
|
||||
|
||||
if (!tfm) {
|
||||
tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
goto unlock;
|
||||
spin_unlock_bh(&crypto_default_null_skcipher_lock);
|
||||
|
||||
crypto_default_null_skcipher = tfm;
|
||||
ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
|
||||
if (IS_ERR(ntfm))
|
||||
return ntfm;
|
||||
|
||||
spin_lock_bh(&crypto_default_null_skcipher_lock);
|
||||
tfm = crypto_default_null_skcipher;
|
||||
if (!tfm) {
|
||||
tfm = ntfm;
|
||||
ntfm = NULL;
|
||||
crypto_default_null_skcipher = tfm;
|
||||
}
|
||||
}
|
||||
|
||||
crypto_default_null_skcipher_refcnt++;
|
||||
spin_unlock_bh(&crypto_default_null_skcipher_lock);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&crypto_default_null_skcipher_lock);
|
||||
crypto_free_sync_skcipher(ntfm);
|
||||
|
||||
return tfm;
|
||||
}
|
||||
|
@ -176,12 +185,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
|
|||
|
||||
void crypto_put_default_null_skcipher(void)
|
||||
{
|
||||
mutex_lock(&crypto_default_null_skcipher_lock);
|
||||
struct crypto_sync_skcipher *tfm = NULL;
|
||||
|
||||
spin_lock_bh(&crypto_default_null_skcipher_lock);
|
||||
if (!--crypto_default_null_skcipher_refcnt) {
|
||||
crypto_free_sync_skcipher(crypto_default_null_skcipher);
|
||||
tfm = crypto_default_null_skcipher;
|
||||
crypto_default_null_skcipher = NULL;
|
||||
}
|
||||
mutex_unlock(&crypto_default_null_skcipher_lock);
|
||||
spin_unlock_bh(&crypto_default_null_skcipher_lock);
|
||||
|
||||
crypto_free_sync_skcipher(tfm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
|
||||
|
||||
|
|
|
@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
|
||||
* https://gitlab.freedesktop.org/drm/amd/-/issues/3929
|
||||
*/
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
|
||||
}
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
|
|||
node_entry = ACPI_PTR_DIFF(node, table_hdr);
|
||||
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
|
||||
sizeof(struct acpi_table_pptt));
|
||||
proc_sz = sizeof(struct acpi_pptt_processor *);
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
cpu_node = (struct acpi_pptt_processor *)entry;
|
||||
|
@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
|
|||
table_end = (unsigned long)table_hdr + table_hdr->length;
|
||||
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
|
||||
sizeof(struct acpi_table_pptt));
|
||||
proc_sz = sizeof(struct acpi_pptt_processor *);
|
||||
proc_sz = sizeof(struct acpi_pptt_processor);
|
||||
|
||||
/* find the processor structure associated with this cpuid */
|
||||
while ((unsigned long)entry + proc_sz < table_end) {
|
||||
|
|
|
@ -2354,8 +2354,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
|
|||
*/
|
||||
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
|
||||
|
||||
if (dev->flags & ATA_DFLAG_CDL)
|
||||
buf[4] = 0x02; /* Support T2A and T2B pages */
|
||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
||||
buf[4] = 0x02; /* T2A and T2B pages enabled */
|
||||
else
|
||||
buf[4] = 0;
|
||||
|
||||
|
@ -3764,12 +3764,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
|
|||
}
|
||||
|
||||
/*
|
||||
* Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
|
||||
* Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
|
||||
* page) into a SET FEATURES command.
|
||||
*/
|
||||
static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
||||
const u8 *buf, int len,
|
||||
u16 *fp)
|
||||
static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
||||
const u8 *buf, int len, u16 *fp)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
struct ata_taskfile *tf = &qc->tf;
|
||||
|
@ -3787,17 +3786,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
|||
/* Check cdl_ctrl */
|
||||
switch (buf[0] & 0x03) {
|
||||
case 0:
|
||||
/* Disable CDL */
|
||||
/* Disable CDL if it is enabled */
|
||||
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
|
||||
return 0;
|
||||
ata_dev_dbg(dev, "Disabling CDL\n");
|
||||
cdl_action = 0;
|
||||
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
|
||||
break;
|
||||
case 0x02:
|
||||
/* Enable CDL T2A/T2B: NCQ priority must be disabled */
|
||||
/*
|
||||
* Enable CDL if not already enabled. Since this is mutually
|
||||
* exclusive with NCQ priority, allow this only if NCQ priority
|
||||
* is disabled.
|
||||
*/
|
||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
||||
return 0;
|
||||
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
|
||||
ata_dev_err(dev,
|
||||
"NCQ priority must be disabled to enable CDL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ata_dev_dbg(dev, "Enabling CDL\n");
|
||||
cdl_action = 1;
|
||||
dev->flags |= ATA_DFLAG_CDL_ENABLED;
|
||||
break;
|
||||
|
|
|
@ -313,13 +313,13 @@ static int hd44780_probe(struct platform_device *pdev)
|
|||
fail3:
|
||||
kfree(hd);
|
||||
fail2:
|
||||
kfree(lcd);
|
||||
charlcd_free(lcd);
|
||||
fail1:
|
||||
kfree(hdc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hd44780_remove(struct platform_device *pdev)
|
||||
static void hd44780_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct charlcd *lcd = platform_get_drvdata(pdev);
|
||||
struct hd44780_common *hdc = lcd->drvdata;
|
||||
|
@ -328,8 +328,7 @@ static int hd44780_remove(struct platform_device *pdev)
|
|||
kfree(hdc->hd44780);
|
||||
kfree(lcd->drvdata);
|
||||
|
||||
kfree(lcd);
|
||||
return 0;
|
||||
charlcd_free(lcd);
|
||||
}
|
||||
|
||||
static const struct of_device_id hd44780_of_match[] = {
|
||||
|
@ -340,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
|
|||
|
||||
static struct platform_driver hd44780_driver = {
|
||||
.probe = hd44780_probe,
|
||||
.remove = hd44780_remove,
|
||||
.remove_new = hd44780_remove,
|
||||
.driver = {
|
||||
.name = "hd44780",
|
||||
.of_match_table = hd44780_of_match,
|
||||
|
|
|
@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
|
|||
kset_put(&sp->subsys);
|
||||
}
|
||||
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus);
|
||||
struct subsys_private *class_to_subsys(const struct class *class);
|
||||
|
||||
struct driver_private {
|
||||
|
@ -179,6 +180,22 @@ int driver_add_groups(struct device_driver *drv, const struct attribute_group **
|
|||
void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups);
|
||||
void device_driver_detach(struct device *dev);
|
||||
|
||||
static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
|
||||
{
|
||||
/*
|
||||
* Majority (all?) read accesses to dev->driver happens either
|
||||
* while holding device lock or in bus/driver code that is only
|
||||
* invoked when the device is bound to a driver and there is no
|
||||
* concern of the pointer being changed while it is being read.
|
||||
* However when reading device's uevent file we read driver pointer
|
||||
* without taking device lock (so we do not block there for
|
||||
* arbitrary amount of time). We use WRITE_ONCE() here to prevent
|
||||
* tearing so that READ_ONCE() can safely be used in uevent code.
|
||||
*/
|
||||
// FIXME - this cast should not be needed "soon"
|
||||
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
|
||||
}
|
||||
|
||||
int devres_release_all(struct device *dev);
|
||||
void device_block_probing(void);
|
||||
void device_unblock_probing(void);
|
||||
|
|
|
@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
|
|||
* NULL. A call to subsys_put() must be done when finished with the pointer in
|
||||
* order for it to be properly freed.
|
||||
*/
|
||||
static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
{
|
||||
struct subsys_private *sp = NULL;
|
||||
struct kobject *kobj;
|
||||
|
|
|
@ -2570,6 +2570,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
|
||||
* function may race with binding and unbinding the device from a driver,
|
||||
* we need to be careful. Binding is generally safe, at worst we miss the
|
||||
* fact that the device is already bound to a driver (but the driver
|
||||
* information that is delivered through uevents is best-effort, it may
|
||||
* become obsolete as soon as it is generated anyways). Unbinding is more
|
||||
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
|
||||
* be used to make sure we are dealing with the same pointer, and to
|
||||
* ensure that driver structure is not going to disappear from under us
|
||||
* we take bus' drivers klist lock. The assumption that only registered
|
||||
* driver can be bound to a device, and to unregister a driver bus code
|
||||
* will take the same lock.
|
||||
*/
|
||||
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct subsys_private *sp = bus_to_subsys(dev->bus);
|
||||
|
||||
if (sp) {
|
||||
scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
|
||||
struct device_driver *drv = READ_ONCE(dev->driver);
|
||||
if (drv)
|
||||
add_uevent_var(env, "DRIVER=%s", drv->name);
|
||||
}
|
||||
|
||||
subsys_put(sp);
|
||||
}
|
||||
}
|
||||
|
||||
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
||||
{
|
||||
const struct device *dev = kobj_to_dev(kobj);
|
||||
|
@ -2601,8 +2630,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
|
|||
if (dev->type && dev->type->name)
|
||||
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
|
||||
|
||||
if (dev->driver)
|
||||
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
|
||||
/* Add "DRIVER=%s" variable if the device is bound to a driver */
|
||||
dev_driver_uevent(dev, env);
|
||||
|
||||
/* Add common DT information about the device */
|
||||
of_device_uevent(dev, env);
|
||||
|
@ -2672,11 +2701,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
|||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(&dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
|
@ -3691,7 +3717,7 @@ done:
|
|||
device_pm_remove(dev);
|
||||
dpm_sysfs_remove(dev);
|
||||
DPMError:
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
bus_remove_device(dev);
|
||||
BusError:
|
||||
device_remove_attrs(dev);
|
||||
|
|
|
@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
|
|||
arch_teardown_dma_ops(dev);
|
||||
kfree(dev->dma_range_map);
|
||||
dev->dma_range_map = NULL;
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
if (dev->pm_domain && dev->pm_domain->dismiss)
|
||||
dev->pm_domain->dismiss(dev);
|
||||
|
@ -629,7 +629,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
|||
}
|
||||
|
||||
re_probe:
|
||||
dev->driver = drv;
|
||||
device_set_driver(dev, drv);
|
||||
|
||||
/* If using pinctrl, bind pins now before probing */
|
||||
ret = pinctrl_bind_pins(dev);
|
||||
|
@ -1014,7 +1014,7 @@ static int __device_attach(struct device *dev, bool allow_async)
|
|||
if (ret == 0)
|
||||
ret = 1;
|
||||
else {
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -441,7 +441,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
|||
cmd->iocb.ki_filp = file;
|
||||
cmd->iocb.ki_complete = lo_rw_aio_complete;
|
||||
cmd->iocb.ki_flags = IOCB_DIRECT;
|
||||
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
|
||||
cmd->iocb.ki_ioprio = req_get_ioprio(rq);
|
||||
|
||||
if (rw == ITER_SOURCE)
|
||||
ret = call_write_iter(file, &cmd->iocb, &iter);
|
||||
|
|
|
@ -315,7 +315,7 @@ static int __init misc_init(void)
|
|||
goto fail_remove;
|
||||
|
||||
err = -EIO;
|
||||
if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
|
||||
if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
|
||||
goto fail_printk;
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1612,8 +1612,8 @@ static void handle_control_message(struct virtio_device *vdev,
|
|||
break;
|
||||
case VIRTIO_CONSOLE_RESIZE: {
|
||||
struct {
|
||||
__u16 rows;
|
||||
__u16 cols;
|
||||
__virtio16 rows;
|
||||
__virtio16 cols;
|
||||
} size;
|
||||
|
||||
if (!is_console_port(port))
|
||||
|
@ -1621,7 +1621,8 @@ static void handle_control_message(struct virtio_device *vdev,
|
|||
|
||||
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
|
||||
sizeof(size));
|
||||
set_console_size(port, size.rows, size.cols);
|
||||
set_console_size(port, virtio16_to_cpu(vdev, size.rows),
|
||||
virtio16_to_cpu(vdev, size.cols));
|
||||
|
||||
port->cons.hvc->irq_requested = 1;
|
||||
resize_console(port);
|
||||
|
|
|
@ -5216,6 +5216,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
|
|||
if (!clkspec)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Check if node in clkspec is in disabled/fail state */
|
||||
if (!of_device_is_available(clkspec->np))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
mutex_lock(&of_clk_mutex);
|
||||
list_for_each_entry(provider, &of_clk_providers, link) {
|
||||
if (provider->node == clkspec->np) {
|
||||
|
|
|
@ -14,6 +14,17 @@
|
|||
|
||||
#include "rzg2l-cpg.h"
|
||||
|
||||
/* Specific registers. */
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
|
||||
/* Clock select configuration. */
|
||||
#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
/* Clock status configuration. */
|
||||
#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1)
|
||||
#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1)
|
||||
|
||||
enum clk_ids {
|
||||
/* Core Clock Outputs exported to DT */
|
||||
LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2,
|
||||
|
@ -75,8 +86,12 @@ static const struct clk_div_table dtable_1_32[] = {
|
|||
|
||||
/* Mux clock tables */
|
||||
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
|
||||
#ifdef CONFIG_ARM64
|
||||
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
|
||||
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
#endif
|
||||
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
|
||||
static const u32 mtable_sdhi[] = { 1, 2, 3 };
|
||||
|
||||
static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
|
||||
/* External Clock Inputs */
|
||||
|
@ -120,11 +135,18 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
|
|||
DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
|
||||
DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
|
||||
DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
|
||||
#ifdef CONFIG_ARM64
|
||||
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
|
||||
#endif
|
||||
#ifdef CONFIG_RISCV
|
||||
DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
|
||||
#endif
|
||||
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
|
||||
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
|
||||
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi),
|
||||
DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi),
|
||||
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4),
|
||||
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4),
|
||||
};
|
||||
|
|
|
@ -15,6 +15,17 @@
|
|||
|
||||
#include "rzg2l-cpg.h"
|
||||
|
||||
/* Specific registers. */
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
|
||||
/* Clock select configuration. */
|
||||
#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
/* Clock status configuration. */
|
||||
#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1)
|
||||
#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1)
|
||||
|
||||
enum clk_ids {
|
||||
/* Core Clock Outputs exported to DT */
|
||||
LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A,
|
||||
|
@ -95,9 +106,11 @@ static const struct clk_div_table dtable_16_128[] = {
|
|||
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
|
||||
static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" };
|
||||
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
|
||||
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
|
||||
|
||||
static const u32 mtable_sdhi[] = { 1, 2, 3 };
|
||||
|
||||
static const struct {
|
||||
struct cpg_core_clk common[56];
|
||||
#ifdef CONFIG_CLK_R9A07G054
|
||||
|
@ -163,8 +176,10 @@ static const struct {
|
|||
DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2),
|
||||
DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
|
||||
DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
|
||||
DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi),
|
||||
DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi),
|
||||
DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
|
||||
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
|
||||
DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8),
|
||||
|
|
|
@ -56,15 +56,37 @@
|
|||
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
|
||||
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
|
||||
|
||||
#define CPG_WEN_BIT BIT(16)
|
||||
|
||||
#define MAX_VCLK_FREQ (148500000)
|
||||
|
||||
struct sd_hw_data {
|
||||
/**
|
||||
* struct clk_hw_data - clock hardware data
|
||||
* @hw: clock hw
|
||||
* @conf: clock configuration (register offset, shift, width)
|
||||
* @sconf: clock status configuration (register offset, shift, width)
|
||||
* @priv: CPG private data structure
|
||||
*/
|
||||
struct clk_hw_data {
|
||||
struct clk_hw hw;
|
||||
u32 conf;
|
||||
u32 sconf;
|
||||
struct rzg2l_cpg_priv *priv;
|
||||
};
|
||||
|
||||
#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
|
||||
#define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
|
||||
|
||||
/**
|
||||
* struct sd_mux_hw_data - SD MUX clock hardware data
|
||||
* @hw_data: clock hw data
|
||||
* @mtable: clock mux table
|
||||
*/
|
||||
struct sd_mux_hw_data {
|
||||
struct clk_hw_data hw_data;
|
||||
const u32 *mtable;
|
||||
};
|
||||
|
||||
#define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
|
||||
|
||||
struct rzg2l_pll5_param {
|
||||
u32 pl5_fracin;
|
||||
|
@ -121,6 +143,76 @@ static void rzg2l_cpg_del_clk_provider(void *data)
|
|||
of_clk_del_provider(data);
|
||||
}
|
||||
|
||||
/* Must be called in atomic context. */
|
||||
static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
|
||||
{
|
||||
u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
|
||||
u32 off = GET_REG_OFFSET(conf);
|
||||
u32 val;
|
||||
|
||||
return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
|
||||
}
|
||||
|
||||
int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
|
||||
void *data)
|
||||
{
|
||||
struct clk_notifier_data *cnd = data;
|
||||
struct clk_hw *hw = __clk_get_hw(cnd->clk);
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 off = GET_REG_OFFSET(clk_hw_data->conf);
|
||||
u32 shift = GET_SHIFT(clk_hw_data->conf);
|
||||
const u32 clk_src_266 = 3;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
|
||||
/*
|
||||
* As per the HW manual, we should not directly switch from 533 MHz to
|
||||
* 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
|
||||
* to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
|
||||
* and then switch to the target setting (2’b01 (533 MHz) or 2’b10
|
||||
* (400 MHz)).
|
||||
* Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
|
||||
* switching register is prohibited.
|
||||
* The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
|
||||
* the index to value mapping is done by adding 1 to the index.
|
||||
*/
|
||||
|
||||
writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
|
||||
|
||||
/* Wait for the update done. */
|
||||
ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
|
||||
if (ret)
|
||||
dev_err(priv->dev, "failed to switch to safe clk source\n");
|
||||
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
|
||||
struct rzg2l_cpg_priv *priv)
|
||||
{
|
||||
struct notifier_block *nb;
|
||||
|
||||
if (!core->notifier)
|
||||
return 0;
|
||||
|
||||
nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
|
||||
if (!nb)
|
||||
return -ENOMEM;
|
||||
|
||||
nb->notifier_call = core->notifier;
|
||||
|
||||
return clk_notifier_register(hw->clk, nb);
|
||||
}
|
||||
|
||||
static struct clk * __init
|
||||
rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
|
||||
struct clk **clks,
|
||||
|
@ -183,63 +275,44 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
|
|||
|
||||
static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
||||
{
|
||||
struct sd_hw_data *hwdata = to_sd_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = hwdata->priv;
|
||||
u32 off = GET_REG_OFFSET(hwdata->conf);
|
||||
u32 shift = GET_SHIFT(hwdata->conf);
|
||||
const u32 clk_src_266 = 2;
|
||||
u32 msk, val, bitmask;
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 off = GET_REG_OFFSET(clk_hw_data->conf);
|
||||
u32 shift = GET_SHIFT(clk_hw_data->conf);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* As per the HW manual, we should not directly switch from 533 MHz to
|
||||
* 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
|
||||
* to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
|
||||
* and then switch to the target setting (2’b01 (533 MHz) or 2’b10
|
||||
* (400 MHz)).
|
||||
* Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
|
||||
* switching register is prohibited.
|
||||
* The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
|
||||
* the index to value mapping is done by adding 1 to the index.
|
||||
*/
|
||||
bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
|
||||
msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
|
||||
val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
|
||||
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
if (index != clk_src_266) {
|
||||
writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
|
||||
|
||||
ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
|
||||
!(val & msk), 10,
|
||||
CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
writel((CPG_WEN_BIT | val) << shift, priv->base + off);
|
||||
|
||||
writel(bitmask | ((index + 1) << shift), priv->base + off);
|
||||
/* Wait for the update done. */
|
||||
ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
|
||||
|
||||
ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
|
||||
!(val & msk), 10,
|
||||
CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
|
||||
if (ret)
|
||||
dev_err(priv->dev, "failed to switch clk source\n");
|
||||
dev_err(priv->dev, "Failed to switch parent\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
|
||||
{
|
||||
struct sd_hw_data *hwdata = to_sd_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = hwdata->priv;
|
||||
u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 val;
|
||||
|
||||
val >>= GET_SHIFT(hwdata->conf);
|
||||
val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
|
||||
val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
|
||||
val >>= GET_SHIFT(clk_hw_data->conf);
|
||||
val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
|
||||
|
||||
return val ? val - 1 : 0;
|
||||
return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
|
||||
}
|
||||
|
||||
static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
|
||||
|
@ -253,31 +326,40 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
|
|||
void __iomem *base,
|
||||
struct rzg2l_cpg_priv *priv)
|
||||
{
|
||||
struct sd_hw_data *clk_hw_data;
|
||||
struct sd_mux_hw_data *sd_mux_hw_data;
|
||||
struct clk_init_data init;
|
||||
struct clk_hw *clk_hw;
|
||||
int ret;
|
||||
|
||||
clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
|
||||
if (!clk_hw_data)
|
||||
sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
|
||||
if (!sd_mux_hw_data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
clk_hw_data->priv = priv;
|
||||
clk_hw_data->conf = core->conf;
|
||||
sd_mux_hw_data->hw_data.priv = priv;
|
||||
sd_mux_hw_data->hw_data.conf = core->conf;
|
||||
sd_mux_hw_data->hw_data.sconf = core->sconf;
|
||||
sd_mux_hw_data->mtable = core->mtable;
|
||||
|
||||
init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
|
||||
init.ops = &rzg2l_cpg_sd_clk_mux_ops;
|
||||
init.flags = 0;
|
||||
init.flags = core->flag;
|
||||
init.num_parents = core->num_parents;
|
||||
init.parent_names = core->parent_names;
|
||||
|
||||
clk_hw = &clk_hw_data->hw;
|
||||
clk_hw = &sd_mux_hw_data->hw_data.hw;
|
||||
clk_hw->init = &init;
|
||||
|
||||
ret = devm_clk_hw_register(priv->dev, clk_hw);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = rzg2l_register_notifier(clk_hw, core, priv);
|
||||
if (ret) {
|
||||
dev_err(priv->dev, "Failed to register notifier for %s\n",
|
||||
core->name);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return clk_hw->clk;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#ifndef __RENESAS_RZG2L_CPG_H__
|
||||
#define __RENESAS_RZG2L_CPG_H__
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#define CPG_SIPLL5_STBY (0x140)
|
||||
#define CPG_SIPLL5_CLK1 (0x144)
|
||||
#define CPG_SIPLL5_CLK3 (0x14C)
|
||||
|
@ -19,7 +21,6 @@
|
|||
#define CPG_PL2_DDIV (0x204)
|
||||
#define CPG_PL3A_DDIV (0x208)
|
||||
#define CPG_PL6_DDIV (0x210)
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
#define CPG_CLKSTATUS (0x280)
|
||||
#define CPG_PL3_SSEL (0x408)
|
||||
#define CPG_PL6_SSEL (0x414)
|
||||
|
@ -43,8 +44,6 @@
|
|||
#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
|
||||
#define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
|
||||
|
||||
#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
|
||||
|
||||
/* n = 0/1/2 for PLL1/4/6 */
|
||||
#define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
|
||||
#define CPG_SAMPLL_CLK2(n) (0x08 + (16 * n))
|
||||
|
@ -69,9 +68,6 @@
|
|||
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
|
||||
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
|
||||
|
||||
#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
#define EXTAL_FREQ_IN_MEGA_HZ (24)
|
||||
|
||||
/**
|
||||
|
@ -90,10 +86,13 @@ struct cpg_core_clk {
|
|||
unsigned int mult;
|
||||
unsigned int type;
|
||||
unsigned int conf;
|
||||
unsigned int sconf;
|
||||
const struct clk_div_table *dtable;
|
||||
const u32 *mtable;
|
||||
const char * const *parent_names;
|
||||
int flag;
|
||||
int mux_flags;
|
||||
notifier_fn_t notifier;
|
||||
u32 flag;
|
||||
u32 mux_flags;
|
||||
int num_parents;
|
||||
};
|
||||
|
||||
|
@ -151,10 +150,11 @@ enum clk_types {
|
|||
.parent_names = _parent_names, \
|
||||
.num_parents = ARRAY_SIZE(_parent_names), \
|
||||
.mux_flags = CLK_MUX_READ_ONLY)
|
||||
#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \
|
||||
#define DEF_SD_MUX(_name, _id, _conf, _sconf, _parent_names, _mtable, _clk_flags, _notifier) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, .sconf = _sconf, \
|
||||
.parent_names = _parent_names, \
|
||||
.num_parents = ARRAY_SIZE(_parent_names))
|
||||
.num_parents = ARRAY_SIZE(_parent_names), \
|
||||
.mtable = _mtable, .flag = _clk_flags, .notifier = _notifier)
|
||||
#define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent)
|
||||
#define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \
|
||||
|
@ -273,4 +273,6 @@ extern const struct rzg2l_cpg_info r9a07g044_cpg_info;
|
|||
extern const struct rzg2l_cpg_info r9a07g054_cpg_info;
|
||||
extern const struct rzg2l_cpg_info r9a09g011_cpg_info;
|
||||
|
||||
int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
|
|||
struct jr3_pci_dev_private *devpriv = dev->private;
|
||||
|
||||
if (devpriv)
|
||||
del_timer_sync(&devpriv->timer);
|
||||
timer_shutdown_sync(&devpriv->timer);
|
||||
|
||||
comedi_pci_detach(dev);
|
||||
}
|
||||
|
|
|
@ -103,11 +103,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] = {
|
|||
|
||||
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct apple_cpu_priv *priv = policy->driver_data;
|
||||
struct cpufreq_policy *policy;
|
||||
struct apple_cpu_priv *priv;
|
||||
struct cpufreq_frequency_table *p;
|
||||
unsigned int pstate;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
if (priv->info->cur_pstate_mask) {
|
||||
u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
|
||||
|
||||
|
|
|
@ -773,7 +773,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
int ret;
|
||||
|
||||
if (!policy)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
|
||||
cpu_data = policy->driver_data;
|
||||
|
||||
|
|
|
@ -33,11 +33,17 @@ static const struct scmi_perf_proto_ops *perf_ops;
|
|||
|
||||
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
struct cpufreq_policy *policy;
|
||||
struct scmi_data *priv;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
|
||||
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
|
|
@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
|
|||
|
||||
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
unsigned long rate = clk_get_rate(priv->clk);
|
||||
struct cpufreq_policy *policy;
|
||||
struct scpi_data *priv;
|
||||
unsigned long rate;
|
||||
|
||||
policy = cpufreq_cpu_get_raw(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
|
||||
priv = policy->driver_data;
|
||||
rate = clk_get_rate(priv->clk);
|
||||
|
||||
return rate / 1000;
|
||||
}
|
||||
|
|
|
@ -107,6 +107,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
|
|||
i2c_priv->hwrng.name = dev_name(&client->dev);
|
||||
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
|
||||
|
||||
/*
|
||||
* According to review by Bill Cox [1], this HWRNG has very low entropy.
|
||||
* [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
|
||||
*/
|
||||
i2c_priv->hwrng.quality = 1;
|
||||
|
||||
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
|
||||
if (ret)
|
||||
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
|
||||
|
|
|
@ -577,6 +577,7 @@ static const struct pci_device_id sp_pci_table[] = {
|
|||
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
|
||||
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
|
||||
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
|
||||
{ PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
|
||||
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
|
||||
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
|
||||
/* Last entry must be zero */
|
||||
|
|
|
@ -478,7 +478,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
resource_size_t rcrb = ri->base;
|
||||
void __iomem *addr;
|
||||
u32 bar0, bar1;
|
||||
u16 cmd;
|
||||
u32 id;
|
||||
|
||||
if (which == CXL_RCRB_UPSTREAM)
|
||||
|
@ -500,7 +499,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
}
|
||||
|
||||
id = readl(addr + PCI_VENDOR_ID);
|
||||
cmd = readw(addr + PCI_COMMAND);
|
||||
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
|
||||
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
|
||||
iounmap(addr);
|
||||
|
@ -515,8 +513,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
|
|||
dev_err(dev, "Failed to access Downstream Port RCRB\n");
|
||||
return CXL_RESOURCE_NONE;
|
||||
}
|
||||
if (!(cmd & PCI_COMMAND_MEMORY))
|
||||
return CXL_RESOURCE_NONE;
|
||||
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
|
||||
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
|
||||
return CXL_RESOURCE_NONE;
|
||||
|
|
|
@ -214,7 +214,7 @@ static long udmabuf_create(struct miscdevice *device,
|
|||
if (!ubuf)
|
||||
return -ENOMEM;
|
||||
|
||||
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
for (i = 0; i < head->count; i++) {
|
||||
if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
|
||||
goto err;
|
||||
|
|
|
@ -827,9 +827,9 @@ static int dmatest_func(void *data)
|
|||
} else {
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
wait_event_freezable_timeout(thread->done_wait,
|
||||
done->done,
|
||||
msecs_to_jiffies(params->timeout));
|
||||
wait_event_timeout(thread->done_wait,
|
||||
done->done,
|
||||
msecs_to_jiffies(params->timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL,
|
||||
NULL);
|
||||
|
|
|
@ -247,6 +247,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
|
|||
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
|
||||
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
|
||||
{ "atmel,hsmci", "cd-gpios", "cd-inverted" },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_PCI_IMX6)
|
||||
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
|
||||
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
|
||||
|
@ -272,9 +275,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
|
|||
#if IS_ENABLED(CONFIG_REGULATOR_GPIO)
|
||||
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
|
||||
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
|
||||
{ "atmel,hsmci", "cd-gpios", "cd-inverted" },
|
||||
#endif
|
||||
};
|
||||
unsigned int i;
|
||||
|
|
|
@ -2789,16 +2789,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
|||
for (k = 0; k < dc_state->stream_count; k++) {
|
||||
bundle->stream_update.stream = dc_state->streams[k];
|
||||
|
||||
for (m = 0; m < dc_state->stream_status->plane_count; m++) {
|
||||
for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
|
||||
bundle->surface_updates[m].surface =
|
||||
dc_state->stream_status->plane_states[m];
|
||||
dc_state->stream_status[k].plane_states[m];
|
||||
bundle->surface_updates[m].surface->force_full_update =
|
||||
true;
|
||||
}
|
||||
|
||||
update_planes_and_stream_adapter(dm->dc,
|
||||
UPDATE_TYPE_FULL,
|
||||
dc_state->stream_status->plane_count,
|
||||
dc_state->stream_status[k].plane_count,
|
||||
dc_state->streams[k],
|
||||
&bundle->stream_update,
|
||||
bundle->surface_updates);
|
||||
|
@ -9590,6 +9590,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
|||
if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
|
||||
return true;
|
||||
|
||||
if (amdgpu_in_reset(adev) && state->allow_modeset)
|
||||
return true;
|
||||
|
||||
/* Exit early if we know that we're adding or removing the plane. */
|
||||
if (old_plane_state->crtc != new_plane_state->crtc)
|
||||
return true;
|
||||
|
|
|
@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
|
|||
.channel = 0,
|
||||
.scan_index = 0,
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.sign = 's',
|
||||
.realbits = 24,
|
||||
.storagebits = 32,
|
||||
.shift = 8,
|
||||
|
@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
|
|||
return ret;
|
||||
|
||||
ret = ad7768_scan_direct(indio_dev);
|
||||
if (ret >= 0)
|
||||
*val = ret;
|
||||
|
||||
iio_device_release_direct_mode(indio_dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*val = sign_extend32(ret, chan->scan_type.realbits - 1);
|
||||
|
||||
return IIO_VAL_INT;
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
|
|||
struct inode *inode = new_inode(dir->i_sb);
|
||||
|
||||
if (!inode) {
|
||||
dput(dentry);
|
||||
error = -EPERM;
|
||||
goto bail;
|
||||
}
|
||||
|
|
|
@ -3619,7 +3619,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
|
|||
* we should not modify the IRTE
|
||||
*/
|
||||
if (!dev_data || !dev_data->use_vapic)
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
ir_data->cfg = irqd_cfg(data);
|
||||
pi_data->ir_data = ir_data;
|
||||
|
|
|
@ -454,7 +454,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
|
|||
#ifdef CONFIG_ACPI
|
||||
static int acpi_num_msi;
|
||||
|
||||
static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
|
||||
static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
|
||||
{
|
||||
struct v2m_data *data;
|
||||
|
||||
|
|
|
@ -313,6 +313,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
|
|||
int ret;
|
||||
|
||||
pchan = chan->con_priv;
|
||||
|
||||
if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
|
||||
return IRQ_NONE;
|
||||
|
||||
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
|
||||
!pchan->chan_in_use)
|
||||
return IRQ_NONE;
|
||||
|
@ -330,13 +334,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
|
||||
return IRQ_NONE;
|
||||
|
||||
/*
|
||||
* Clear this flag after updating interrupt ack register and just
|
||||
* before mbox_chan_received_data() which might call pcc_send_data()
|
||||
* where the flag is set again to start new transfer. This is
|
||||
* required to avoid any possible race in updatation of this flag.
|
||||
*/
|
||||
pchan->chan_in_use = false;
|
||||
mbox_chan_received_data(chan, NULL);
|
||||
|
||||
check_and_ack(pchan, chan);
|
||||
pchan->chan_in_use = false;
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
|
|||
|
||||
ret = mcb_device_register(bus, mdev);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -2061,14 +2061,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
|
|||
if (!rdev_set_badblocks(rdev, sect, s, 0))
|
||||
abort = 1;
|
||||
}
|
||||
if (abort) {
|
||||
conf->recovery_disabled =
|
||||
mddev->recovery_disabled;
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
md_done_sync(mddev, r1_bio->sectors, 0);
|
||||
put_buf(r1_bio);
|
||||
if (abort)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Try next page */
|
||||
sectors -= s;
|
||||
sect += s;
|
||||
|
@ -2207,10 +2202,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|||
int disks = conf->raid_disks * 2;
|
||||
struct bio *wbio;
|
||||
|
||||
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
|
||||
/* ouch - failed to read all of that. */
|
||||
if (!fix_sync_read_error(r1_bio))
|
||||
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
||||
/*
|
||||
* ouch - failed to read all of that.
|
||||
* No need to fix read error for check/repair
|
||||
* because all member disks are read.
|
||||
*/
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
|
||||
!fix_sync_read_error(r1_bio)) {
|
||||
conf->recovery_disabled = mddev->recovery_disabled;
|
||||
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
||||
md_done_sync(mddev, r1_bio->sectors, 0);
|
||||
put_buf(r1_bio);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
process_checks(r1_bio);
|
||||
|
|
|
@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
|
|||
continue;
|
||||
|
||||
sd = media_entity_to_v4l2_subdev(ved->ent);
|
||||
/*
|
||||
* Do not call .s_stream() to stop an already
|
||||
* stopped/unstarted subdev.
|
||||
*/
|
||||
if (!v4l2_subdev_is_streaming(sd))
|
||||
continue;
|
||||
v4l2_subdev_call(sd, video, s_stream, 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -363,12 +363,8 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
|
|||
* The .s_stream() operation must never be called to start or stop an
|
||||
* already started or stopped subdev. Catch offenders but don't return
|
||||
* an error yet to avoid regressions.
|
||||
*
|
||||
* As .s_stream() is mutually exclusive with the .enable_streams() and
|
||||
* .disable_streams() operation, we can use the enabled_streams field
|
||||
* to store the subdev streaming state.
|
||||
*/
|
||||
if (WARN_ON(!!sd->enabled_streams == !!enable))
|
||||
if (WARN_ON(sd->s_stream_enabled == !!enable))
|
||||
return 0;
|
||||
|
||||
ret = sd->ops->video->s_stream(sd, enable);
|
||||
|
@ -379,7 +375,7 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
|
|||
}
|
||||
|
||||
if (!ret) {
|
||||
sd->enabled_streams = enable ? BIT(0) : 0;
|
||||
sd->s_stream_enabled = enable;
|
||||
|
||||
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
|
||||
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
|
||||
|
@ -1929,37 +1925,43 @@ static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
|
|||
u64 streams_mask)
|
||||
{
|
||||
struct device *dev = sd->entity.graph_obj.mdev->dev;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The subdev doesn't implement pad-based stream enable, fall back
|
||||
* on the .s_stream() operation. This can only be done for subdevs that
|
||||
* have a single source pad, as sd->enabled_streams is global to the
|
||||
* subdev.
|
||||
* to the .s_stream() operation.
|
||||
*/
|
||||
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < sd->entity.num_pads; ++i) {
|
||||
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/*
|
||||
* .s_stream() means there is no streams support, so the only allowed
|
||||
* stream is the implicit stream 0.
|
||||
*/
|
||||
if (streams_mask != BIT_ULL(0))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (sd->enabled_streams & streams_mask) {
|
||||
dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
|
||||
streams_mask, sd->entity.name, pad);
|
||||
/*
|
||||
* We use a 64-bit bitmask for tracking enabled pads, so only subdevices
|
||||
* with 64 pads or less can be supported.
|
||||
*/
|
||||
if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (sd->enabled_pads & BIT_ULL(pad)) {
|
||||
dev_dbg(dev, "pad %u already enabled on %s\n",
|
||||
pad, sd->entity.name);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
/* Start streaming when the first streams are enabled. */
|
||||
if (!sd->enabled_streams) {
|
||||
/* Start streaming when the first pad is enabled. */
|
||||
if (!sd->enabled_pads) {
|
||||
ret = v4l2_subdev_call(sd, video, s_stream, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sd->enabled_streams |= streams_mask;
|
||||
sd->enabled_pads |= BIT_ULL(pad);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2046,37 +2048,43 @@ static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
|
|||
u64 streams_mask)
|
||||
{
|
||||
struct device *dev = sd->entity.graph_obj.mdev->dev;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the subdev doesn't implement pad-based stream enable, fall back
|
||||
* on the .s_stream() operation. This can only be done for subdevs that
|
||||
* have a single source pad, as sd->enabled_streams is global to the
|
||||
* subdev.
|
||||
* If the subdev doesn't implement pad-based stream enable, fall back
|
||||
* to the .s_stream() operation.
|
||||
*/
|
||||
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < sd->entity.num_pads; ++i) {
|
||||
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/*
|
||||
* .s_stream() means there is no streams support, so the only allowed
|
||||
* stream is the implicit stream 0.
|
||||
*/
|
||||
if (streams_mask != BIT_ULL(0))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if ((sd->enabled_streams & streams_mask) != streams_mask) {
|
||||
dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
|
||||
streams_mask, sd->entity.name, pad);
|
||||
/*
|
||||
* We use a 64-bit bitmask for tracking enabled pads, so only subdevices
|
||||
* with 64 pads or less can be supported.
|
||||
*/
|
||||
if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(sd->enabled_pads & BIT_ULL(pad))) {
|
||||
dev_dbg(dev, "pad %u already disabled on %s\n",
|
||||
pad, sd->entity.name);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
/* Stop streaming when the last streams are disabled. */
|
||||
if (!(sd->enabled_streams & ~streams_mask)) {
|
||||
if (!(sd->enabled_pads & ~BIT_ULL(pad))) {
|
||||
ret = v4l2_subdev_call(sd, video, s_stream, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sd->enabled_streams &= ~streams_mask;
|
||||
sd->enabled_pads &= ~BIT_ULL(pad);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2232,6 +2240,31 @@ void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
|
||||
|
||||
bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_subdev_state *state;
|
||||
|
||||
if (!v4l2_subdev_has_op(sd, pad, enable_streams))
|
||||
return sd->s_stream_enabled;
|
||||
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
|
||||
return !!sd->enabled_pads;
|
||||
|
||||
state = v4l2_subdev_get_locked_active_state(sd);
|
||||
|
||||
for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
|
||||
const struct v4l2_subdev_stream_config *cfg;
|
||||
|
||||
cfg = &state->stream_configs.configs[i];
|
||||
|
||||
if (cfg->enabled)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
|
||||
|
||||
int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
|
||||
{
|
||||
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
|
||||
|
|
|
@ -28,6 +28,13 @@ static const unsigned long rodata = 0xAA55AA55;
|
|||
/* This is marked __ro_after_init, so it should ultimately be .rodata. */
|
||||
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
|
||||
|
||||
/*
|
||||
* This is a pointer to do_nothing() which is initialized at runtime rather
|
||||
* than build time to avoid objtool IBT validation warnings caused by an
|
||||
* inlined unrolled memcpy() in execute_location().
|
||||
*/
|
||||
static void __ro_after_init *do_nothing_ptr;
|
||||
|
||||
/*
|
||||
* This just returns to the caller. It is designed to be copied into
|
||||
* non-executable memory regions.
|
||||
|
@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
|
|||
{
|
||||
void (*func)(void);
|
||||
func_desc_t fdesc;
|
||||
void *do_nothing_text = dereference_function_descriptor(do_nothing);
|
||||
|
||||
pr_info("attempting ok execution at %px\n", do_nothing_text);
|
||||
pr_info("attempting ok execution at %px\n", do_nothing_ptr);
|
||||
do_nothing();
|
||||
|
||||
if (write == CODE_WRITE) {
|
||||
memcpy(dst, do_nothing_text, EXEC_SIZE);
|
||||
memcpy(dst, do_nothing_ptr, EXEC_SIZE);
|
||||
flush_icache_range((unsigned long)dst,
|
||||
(unsigned long)dst + EXEC_SIZE);
|
||||
}
|
||||
|
@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
|
|||
|
||||
void __init lkdtm_perms_init(void)
|
||||
{
|
||||
do_nothing_ptr = dereference_function_descriptor(do_nothing);
|
||||
|
||||
/* Make sure we can write to __ro_after_init values during __init */
|
||||
ro_after_init |= 0xAA;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
struct pci1xxxx_gpio {
|
||||
struct auxiliary_device *aux_dev;
|
||||
void __iomem *reg_base;
|
||||
raw_spinlock_t wa_lock;
|
||||
struct gpio_chip gpio;
|
||||
spinlock_t lock;
|
||||
int irq_base;
|
||||
|
@ -164,7 +165,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
|
||||
writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio));
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -254,6 +255,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
|
|||
struct pci1xxxx_gpio *priv = dev_id;
|
||||
struct gpio_chip *gc = &priv->gpio;
|
||||
unsigned long int_status = 0;
|
||||
unsigned long wa_flags;
|
||||
unsigned long flags;
|
||||
u8 pincount;
|
||||
int bit;
|
||||
|
@ -277,7 +279,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
|
|||
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
|
||||
handle_nested_irq(irq);
|
||||
raw_spin_lock_irqsave(&priv->wa_lock, wa_flags);
|
||||
generic_handle_irq(irq);
|
||||
raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
|
|
@ -117,6 +117,7 @@
|
|||
|
||||
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
|
||||
|
||||
#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
|
||||
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
|
||||
|
||||
/*
|
||||
|
|
|
@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
|||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
|
||||
|
||||
/* required last entry */
|
||||
|
|
|
@ -1866,7 +1866,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
|
|||
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
|
||||
return 0;
|
||||
|
||||
ice = of_qcom_ice_get(dev);
|
||||
ice = devm_of_qcom_ice_get(dev);
|
||||
if (ice == ERR_PTR(-EOPNOTSUPP)) {
|
||||
dev_warn(dev, "Disabling inline encryption support\n");
|
||||
ice = NULL;
|
||||
|
|
|
@ -2596,6 +2596,9 @@ mt7531_setup_common(struct dsa_switch *ds)
|
|||
struct mt7530_priv *priv = ds->priv;
|
||||
int ret, i;
|
||||
|
||||
ds->assisted_learning_on_cpu_port = true;
|
||||
ds->mtu_enforcement_ingress = true;
|
||||
|
||||
mt753x_trap_frames(priv);
|
||||
|
||||
/* Enable and reset MIB counters */
|
||||
|
@ -2735,9 +2738,6 @@ mt7531_setup(struct dsa_switch *ds)
|
|||
|
||||
mt7531_setup_common(ds);
|
||||
|
||||
ds->assisted_learning_on_cpu_port = true;
|
||||
ds->mtu_enforcement_ingress = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -5047,6 +5047,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
|
|||
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
|
||||
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
|
||||
.port_tag_remap = mv88e6095_port_tag_remap,
|
||||
.port_set_policy = mv88e6352_port_set_policy,
|
||||
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
|
||||
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
|
||||
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
|
||||
|
@ -5071,8 +5072,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
|
|||
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
|
||||
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
|
||||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6185_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.stu_getnext = mv88e6352_g1_stu_getnext,
|
||||
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6352_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
|
@ -5097,6 +5100,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
|
|||
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
|
||||
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
|
||||
.port_tag_remap = mv88e6095_port_tag_remap,
|
||||
.port_set_policy = mv88e6352_port_set_policy,
|
||||
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
|
||||
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
|
||||
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
|
||||
|
@ -5120,8 +5124,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
|
|||
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
|
||||
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
|
||||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6185_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.stu_getnext = mv88e6352_g1_stu_getnext,
|
||||
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6352_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
|
@ -5713,7 +5719,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.global1_addr = 0x1b,
|
||||
.global2_addr = 0x1c,
|
||||
.age_time_coeff = 3750,
|
||||
.atu_move_port_mask = 0x1f,
|
||||
.atu_move_port_mask = 0xf,
|
||||
.g1_irqs = 9,
|
||||
.g2_irqs = 10,
|
||||
.pvt = true,
|
||||
|
@ -6114,9 +6120,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.num_databases = 4096,
|
||||
.num_macs = 8192,
|
||||
.num_ports = 7,
|
||||
.num_internal_phys = 5,
|
||||
.num_internal_phys = 2,
|
||||
.internal_phys_offset = 3,
|
||||
.num_gpio = 15,
|
||||
.max_vid = 4095,
|
||||
.max_sid = 63,
|
||||
.port_base_addr = 0x10,
|
||||
.phy_base_addr = 0x0,
|
||||
.global1_addr = 0x1b,
|
||||
|
@ -6139,9 +6147,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.num_databases = 4096,
|
||||
.num_macs = 8192,
|
||||
.num_ports = 7,
|
||||
.num_internal_phys = 5,
|
||||
.num_internal_phys = 2,
|
||||
.internal_phys_offset = 3,
|
||||
.num_gpio = 15,
|
||||
.max_vid = 4095,
|
||||
.max_sid = 63,
|
||||
.port_base_addr = 0x10,
|
||||
.phy_base_addr = 0x0,
|
||||
.global1_addr = 0x1b,
|
||||
|
@ -6150,6 +6160,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.g1_irqs = 8,
|
||||
.g2_irqs = 10,
|
||||
.atu_move_port_mask = 0xf,
|
||||
.pvt = true,
|
||||
.multi_chip = true,
|
||||
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
|
||||
.ptp_support = true,
|
||||
|
@ -6172,7 +6183,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
|||
.global1_addr = 0x1b,
|
||||
.global2_addr = 0x1c,
|
||||
.age_time_coeff = 3750,
|
||||
.atu_move_port_mask = 0x1f,
|
||||
.atu_move_port_mask = 0xf,
|
||||
.g1_irqs = 9,
|
||||
.g2_irqs = 10,
|
||||
.pvt = true,
|
||||
|
|
|
@ -5,11 +5,6 @@
|
|||
|
||||
#include "core.h"
|
||||
|
||||
struct pdsc_wait_context {
|
||||
struct pdsc_qcq *qcq;
|
||||
struct completion wait_completion;
|
||||
};
|
||||
|
||||
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
|
||||
{
|
||||
union pds_core_notifyq_comp *comp;
|
||||
|
@ -110,10 +105,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
|
|||
q_info = &q->info[q->tail_idx];
|
||||
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
|
||||
|
||||
/* Copy out the completion data */
|
||||
memcpy(q_info->dest, comp, sizeof(*comp));
|
||||
|
||||
complete_all(&q_info->wc->wait_completion);
|
||||
if (!completion_done(&q_info->completion)) {
|
||||
memcpy(q_info->dest, comp, sizeof(*comp));
|
||||
complete(&q_info->completion);
|
||||
}
|
||||
|
||||
if (cq->tail_idx == cq->num_descs - 1)
|
||||
cq->done_color = !cq->done_color;
|
||||
|
@ -166,8 +161,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
|
|||
static int __pdsc_adminq_post(struct pdsc *pdsc,
|
||||
struct pdsc_qcq *qcq,
|
||||
union pds_core_adminq_cmd *cmd,
|
||||
union pds_core_adminq_comp *comp,
|
||||
struct pdsc_wait_context *wc)
|
||||
union pds_core_adminq_comp *comp)
|
||||
{
|
||||
struct pdsc_queue *q = &qcq->q;
|
||||
struct pdsc_q_info *q_info;
|
||||
|
@ -209,9 +203,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
|
|||
/* Post the request */
|
||||
index = q->head_idx;
|
||||
q_info = &q->info[index];
|
||||
q_info->wc = wc;
|
||||
q_info->dest = comp;
|
||||
memcpy(q_info->desc, cmd, sizeof(*cmd));
|
||||
reinit_completion(&q_info->completion);
|
||||
|
||||
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
|
||||
q->head_idx, q->tail_idx);
|
||||
|
@ -235,16 +229,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
|
|||
union pds_core_adminq_comp *comp,
|
||||
bool fast_poll)
|
||||
{
|
||||
struct pdsc_wait_context wc = {
|
||||
.wait_completion =
|
||||
COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
|
||||
};
|
||||
unsigned long poll_interval = 1;
|
||||
unsigned long poll_jiffies;
|
||||
unsigned long time_limit;
|
||||
unsigned long time_start;
|
||||
unsigned long time_done;
|
||||
unsigned long remaining;
|
||||
struct completion *wc;
|
||||
int err = 0;
|
||||
int index;
|
||||
|
||||
|
@ -254,20 +245,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
wc.qcq = &pdsc->adminqcq;
|
||||
index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
|
||||
index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
|
||||
if (index < 0) {
|
||||
err = index;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
wc = &pdsc->adminqcq.q.info[index].completion;
|
||||
time_start = jiffies;
|
||||
time_limit = time_start + HZ * pdsc->devcmd_timeout;
|
||||
do {
|
||||
/* Timeslice the actual wait to catch IO errors etc early */
|
||||
poll_jiffies = msecs_to_jiffies(poll_interval);
|
||||
remaining = wait_for_completion_timeout(&wc.wait_completion,
|
||||
poll_jiffies);
|
||||
remaining = wait_for_completion_timeout(wc, poll_jiffies);
|
||||
if (remaining)
|
||||
break;
|
||||
|
||||
|
@ -296,9 +286,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
|
|||
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
|
||||
__func__, jiffies_to_msecs(time_done - time_start));
|
||||
|
||||
/* Check the results */
|
||||
if (time_after_eq(time_done, time_limit))
|
||||
/* Check the results and clear an un-completed timeout */
|
||||
if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
|
||||
err = -ETIMEDOUT;
|
||||
complete(wc);
|
||||
}
|
||||
|
||||
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
|
||||
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
|
|
|
@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
|
|||
dev_dbg(pf->dev, "%s: %s opcode %d\n",
|
||||
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
|
||||
|
||||
if (pf->state)
|
||||
return -ENXIO;
|
||||
|
||||
/* Wrap the client's request */
|
||||
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
|
||||
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
|
||||
|
|
|
@ -169,8 +169,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
|
|||
q->base = base;
|
||||
q->base_pa = base_pa;
|
||||
|
||||
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
|
||||
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
|
||||
cur->desc = base + (i * q->desc_size);
|
||||
init_completion(&cur->completion);
|
||||
}
|
||||
}
|
||||
|
||||
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
|
||||
|
|
|
@ -96,7 +96,7 @@ struct pdsc_q_info {
|
|||
unsigned int bytes;
|
||||
unsigned int nbufs;
|
||||
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
|
||||
struct pdsc_wait_context *wc;
|
||||
struct completion completion;
|
||||
void *dest;
|
||||
};
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
|||
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
|
||||
.fw_control.oper = PDS_CORE_FW_GET_LIST,
|
||||
};
|
||||
struct pds_core_fw_list_info fw_list;
|
||||
struct pds_core_fw_list_info fw_list = {};
|
||||
struct pdsc *pdsc = devlink_priv(dl);
|
||||
union pds_core_dev_comp comp;
|
||||
char buf[32];
|
||||
|
@ -114,8 +114,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
|||
if (!err)
|
||||
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
|
||||
mutex_unlock(&pdsc->devcmd_lock);
|
||||
if (err && err != -EIO)
|
||||
return err;
|
||||
|
||||
listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
|
||||
for (i = 0; i < listlen; i++) {
|
||||
|
|
|
@ -3949,11 +3949,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
|
|||
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
|
||||
|
||||
if (mtk_is_netsys_v3_or_greater(eth)) {
|
||||
/* PSE should not drop port1, port8 and port9 packets */
|
||||
mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
|
||||
/* PSE dummy page mechanism */
|
||||
mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
|
||||
PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
|
||||
|
||||
/* PSE free buffer drop threshold */
|
||||
mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
|
||||
|
||||
/* PSE should not drop port8, port9 and port13 packets from
|
||||
* WDMA Tx
|
||||
*/
|
||||
mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
|
||||
|
||||
/* PSE should drop packets to port8, port9 and port13 on WDMA Rx
|
||||
* ring full
|
||||
*/
|
||||
mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
|
||||
mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
|
||||
mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
|
||||
|
||||
/* GDM and CDM Threshold */
|
||||
mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
|
||||
mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
|
||||
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
|
||||
|
||||
/* Disable GDM1 RX CRC stripping */
|
||||
|
@ -3970,7 +3986,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
|
|||
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
|
||||
|
||||
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
|
||||
mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
|
||||
mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
|
||||
|
||||
/* PSE Free Queue Flow Control */
|
||||
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
|
||||
|
|
|
@ -149,7 +149,15 @@
|
|||
#define PSE_FQFC_CFG1 0x100
|
||||
#define PSE_FQFC_CFG2 0x104
|
||||
#define PSE_DROP_CFG 0x108
|
||||
#define PSE_PPE0_DROP 0x110
|
||||
#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
|
||||
|
||||
/* PSE Last FreeQ Page Request Control */
|
||||
#define PSE_DUMY_REQ 0x10C
|
||||
/* PSE_DUMY_REQ is not a typo but actually called like that also in
|
||||
* MediaTek's datasheet
|
||||
*/
|
||||
#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
|
||||
#define DUMMY_PAGE_THR 0x1
|
||||
|
||||
/* PSE Input Queue Reservation Register*/
|
||||
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
|
||||
|
|
|
@ -31,47 +31,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
|
|||
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
|
||||
}
|
||||
|
||||
static int lan88xx_phy_config_intr(struct phy_device *phydev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
|
||||
/* unmask all source and clear them before enable */
|
||||
rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
|
||||
rc = phy_read(phydev, LAN88XX_INT_STS);
|
||||
rc = phy_write(phydev, LAN88XX_INT_MASK,
|
||||
LAN88XX_INT_MASK_MDINTPIN_EN_ |
|
||||
LAN88XX_INT_MASK_LINK_CHANGE_);
|
||||
} else {
|
||||
rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Ack interrupts after they have been disabled */
|
||||
rc = phy_read(phydev, LAN88XX_INT_STS);
|
||||
}
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
|
||||
{
|
||||
int irq_status;
|
||||
|
||||
irq_status = phy_read(phydev, LAN88XX_INT_STS);
|
||||
if (irq_status < 0) {
|
||||
phy_error(phydev);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
|
||||
return IRQ_NONE;
|
||||
|
||||
phy_trigger_machine(phydev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int lan88xx_suspend(struct phy_device *phydev)
|
||||
{
|
||||
struct lan88xx_priv *priv = phydev->priv;
|
||||
|
@ -392,8 +351,9 @@ static struct phy_driver microchip_phy_driver[] = {
|
|||
.config_aneg = lan88xx_config_aneg,
|
||||
.link_change_notify = lan88xx_link_change_notify,
|
||||
|
||||
.config_intr = lan88xx_phy_config_intr,
|
||||
.handle_interrupt = lan88xx_handle_interrupt,
|
||||
/* Interrupt handling is broken, do not define related
|
||||
* functions to force polling.
|
||||
*/
|
||||
|
||||
.suspend = lan88xx_suspend,
|
||||
.resume = genphy_resume,
|
||||
|
|
|
@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy)
|
|||
if (!phy->phy_num_led_triggers)
|
||||
return 0;
|
||||
|
||||
phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
|
||||
sizeof(*phy->led_link_trigger),
|
||||
GFP_KERNEL);
|
||||
phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
|
||||
GFP_KERNEL);
|
||||
if (!phy->led_link_trigger) {
|
||||
err = -ENOMEM;
|
||||
goto out_clear;
|
||||
|
@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy)
|
|||
if (err)
|
||||
goto out_free_link;
|
||||
|
||||
phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
|
||||
phy->phy_num_led_triggers,
|
||||
sizeof(struct phy_led_trigger),
|
||||
GFP_KERNEL);
|
||||
phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
|
||||
sizeof(struct phy_led_trigger),
|
||||
GFP_KERNEL);
|
||||
if (!phy->phy_led_triggers) {
|
||||
err = -ENOMEM;
|
||||
goto out_unreg_link;
|
||||
|
@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy)
|
|||
out_unreg:
|
||||
while (i--)
|
||||
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
|
||||
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
|
||||
kfree(phy->phy_led_triggers);
|
||||
out_unreg_link:
|
||||
phy_led_trigger_unregister(phy->led_link_trigger);
|
||||
out_free_link:
|
||||
devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
|
||||
kfree(phy->led_link_trigger);
|
||||
phy->led_link_trigger = NULL;
|
||||
out_clear:
|
||||
phy->phy_num_led_triggers = 0;
|
||||
|
@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
|
|||
|
||||
for (i = 0; i < phy->phy_num_led_triggers; i++)
|
||||
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
|
||||
kfree(phy->phy_led_triggers);
|
||||
phy->phy_led_triggers = NULL;
|
||||
|
||||
if (phy->led_link_trigger)
|
||||
if (phy->led_link_trigger) {
|
||||
phy_led_trigger_unregister(phy->led_link_trigger);
|
||||
kfree(phy->led_link_trigger);
|
||||
phy->led_link_trigger = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
|
||||
|
|
|
@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
|
|||
|
||||
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
|
||||
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
|
||||
rbi->len, false);
|
||||
rcd->len, false);
|
||||
xdp_buff_clear_frags_flag(&xdp);
|
||||
|
||||
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
|
||||
|
|
|
@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
|
|||
act = bpf_prog_run_xdp(prog, xdp);
|
||||
switch (act) {
|
||||
case XDP_TX:
|
||||
get_page(pdata);
|
||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
|
||||
if (unlikely(!err))
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
else if (unlikely(err < 0))
|
||||
if (unlikely(!xdpf)) {
|
||||
trace_xdp_exception(queue->info->netdev, prog, act);
|
||||
break;
|
||||
}
|
||||
get_page(pdata);
|
||||
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
|
||||
if (unlikely(err <= 0)) {
|
||||
if (err < 0)
|
||||
trace_xdp_exception(queue->info->netdev, prog, act);
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
}
|
||||
break;
|
||||
case XDP_REDIRECT:
|
||||
get_page(pdata);
|
||||
err = xdp_do_redirect(queue->info->netdev, xdp, prog);
|
||||
*need_xdp_flush = true;
|
||||
if (unlikely(err))
|
||||
if (unlikely(err)) {
|
||||
trace_xdp_exception(queue->info->netdev, prog, act);
|
||||
xdp_return_buff(xdp);
|
||||
}
|
||||
break;
|
||||
case XDP_PASS:
|
||||
case XDP_DROP:
|
||||
|
|
|
@ -1318,6 +1318,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
|
||||
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
|
||||
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
|
||||
{ PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
|
||||
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
|
||||
{ 0, }
|
||||
};
|
||||
|
|
|
@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
|
|||
static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
|
||||
unsigned char *mw_cnt)
|
||||
{
|
||||
struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
|
||||
struct idt_mw_cfg *mws;
|
||||
const struct idt_ntb_bar *bars;
|
||||
enum idt_mw_type mw_type;
|
||||
unsigned char widx, bidx, en_cnt;
|
||||
|
@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
|
|||
int aprt_size;
|
||||
u32 data;
|
||||
|
||||
mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS,
|
||||
sizeof(*mws), GFP_KERNEL);
|
||||
if (!mws)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Retrieve the array of the BARs registers */
|
||||
bars = portdata_tbl[port].bars;
|
||||
|
||||
|
@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
|
|||
}
|
||||
}
|
||||
|
||||
/* Allocate memory for memory window descriptors */
|
||||
ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
|
||||
GFP_KERNEL);
|
||||
if (!ret_mws)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Copy the info of detected memory windows */
|
||||
memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
|
||||
|
||||
return ret_mws;
|
||||
return mws;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -3972,6 +3972,15 @@ static void nvme_scan_work(struct work_struct *work)
|
|||
nvme_scan_ns_sequential(ctrl);
|
||||
}
|
||||
mutex_unlock(&ctrl->scan_lock);
|
||||
|
||||
/* Requeue if we have missed AENs */
|
||||
if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
|
||||
nvme_queue_scan(ctrl);
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
else if (ctrl->ana_log_buf)
|
||||
/* Re-read the ANA log page to not miss updates */
|
||||
queue_work(nvme_wq, &ctrl->ana_work);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -426,7 +426,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
|
|||
struct nvme_ns *ns;
|
||||
|
||||
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
list_for_each_entry_srcu(ns, &head->list, siblings,
|
||||
srcu_read_lock_held(&head->srcu)) {
|
||||
|
|
|
@ -1030,33 +1030,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|||
struct nvmet_fc_hostport *newhost, *match = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Caller holds a reference on tgtport.
|
||||
*/
|
||||
|
||||
/* if LLDD not implemented, leave as NULL */
|
||||
if (!hosthandle)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* take reference for what will be the newly allocated hostport if
|
||||
* we end up using a new allocation
|
||||
*/
|
||||
if (!nvmet_fc_tgtport_get(tgtport))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
match = nvmet_fc_match_hostport(tgtport, hosthandle);
|
||||
spin_unlock_irqrestore(&tgtport->lock, flags);
|
||||
|
||||
if (match) {
|
||||
/* no new allocation - release reference */
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
if (match)
|
||||
return match;
|
||||
}
|
||||
|
||||
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
|
||||
if (!newhost) {
|
||||
/* no new allocation - release reference */
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
if (!newhost)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&tgtport->lock, flags);
|
||||
match = nvmet_fc_match_hostport(tgtport, hosthandle);
|
||||
|
@ -1065,6 +1056,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
|
|||
kfree(newhost);
|
||||
newhost = match;
|
||||
} else {
|
||||
nvmet_fc_tgtport_get(tgtport);
|
||||
newhost->tgtport = tgtport;
|
||||
newhost->hosthandle = hosthandle;
|
||||
INIT_LIST_HEAD(&newhost->host_list);
|
||||
|
@ -1099,7 +1091,8 @@ static void
|
|||
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
|
||||
{
|
||||
nvmet_fc_tgtport_get(assoc->tgtport);
|
||||
queue_work(nvmet_wq, &assoc->del_work);
|
||||
if (!queue_work(nvmet_wq, &assoc->del_work))
|
||||
nvmet_fc_tgtport_put(assoc->tgtport);
|
||||
}
|
||||
|
||||
static struct nvmet_fc_tgt_assoc *
|
||||
|
|
|
@ -262,25 +262,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
|
|||
*/
|
||||
int of_resolve_phandles(struct device_node *overlay)
|
||||
{
|
||||
struct device_node *child, *local_fixups, *refnode;
|
||||
struct device_node *tree_symbols, *overlay_fixups;
|
||||
struct device_node *child, *refnode;
|
||||
struct device_node *overlay_fixups;
|
||||
struct device_node __free(device_node) *local_fixups = NULL;
|
||||
struct property *prop;
|
||||
const char *refpath;
|
||||
phandle phandle, phandle_delta;
|
||||
int err;
|
||||
|
||||
tree_symbols = NULL;
|
||||
|
||||
if (!overlay) {
|
||||
pr_err("null overlay\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!of_node_check_flag(overlay, OF_DETACHED)) {
|
||||
pr_err("overlay not detached\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
phandle_delta = live_tree_max_phandle() + 1;
|
||||
|
@ -292,7 +289,7 @@ int of_resolve_phandles(struct device_node *overlay)
|
|||
|
||||
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
overlay_fixups = NULL;
|
||||
|
||||
|
@ -301,16 +298,13 @@ int of_resolve_phandles(struct device_node *overlay)
|
|||
overlay_fixups = child;
|
||||
}
|
||||
|
||||
if (!overlay_fixups) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
if (!overlay_fixups)
|
||||
return 0;
|
||||
|
||||
tree_symbols = of_find_node_by_path("/__symbols__");
|
||||
struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
|
||||
if (!tree_symbols) {
|
||||
pr_err("no symbols in root of device tree.\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_property_of_node(overlay_fixups, prop) {
|
||||
|
@ -324,14 +318,12 @@ int of_resolve_phandles(struct device_node *overlay)
|
|||
if (err) {
|
||||
pr_err("node label '%s' not found in live devicetree symbols table\n",
|
||||
prop->name);
|
||||
goto out;
|
||||
return err;
|
||||
}
|
||||
|
||||
refnode = of_find_node_by_path(refpath);
|
||||
if (!refnode) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (!refnode)
|
||||
return -ENOENT;
|
||||
|
||||
phandle = refnode->phandle;
|
||||
of_node_put(refnode);
|
||||
|
@ -341,11 +333,8 @@ int of_resolve_phandles(struct device_node *overlay)
|
|||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err)
|
||||
pr_err("overlay phandle fixup failed: %d\n", err);
|
||||
of_node_put(tree_symbols);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_resolve_phandles);
|
||||
|
|
|
@ -885,6 +885,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
|||
resource_size_t offset, next_offset;
|
||||
LIST_HEAD(resources);
|
||||
struct resource *res, *next_res;
|
||||
bool bus_registered = false;
|
||||
char addr[64], *fmt;
|
||||
const char *name;
|
||||
int err;
|
||||
|
@ -948,6 +949,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
|||
name = dev_name(&bus->dev);
|
||||
|
||||
err = device_register(&bus->dev);
|
||||
bus_registered = true;
|
||||
if (err)
|
||||
goto unregister;
|
||||
|
||||
|
@ -1031,12 +1033,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
|||
unregister:
|
||||
put_device(&bridge->dev);
|
||||
device_del(&bridge->dev);
|
||||
|
||||
free:
|
||||
#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
||||
pci_bus_release_domain_nr(bus, parent);
|
||||
#endif
|
||||
kfree(bus);
|
||||
if (bus_registered)
|
||||
put_device(&bus->dev);
|
||||
else
|
||||
kfree(bus);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -243,6 +243,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
|
|||
int ret;
|
||||
|
||||
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
|
||||
if (!chip.label)
|
||||
return -ENOMEM;
|
||||
|
||||
chip.parent = priv->dev;
|
||||
chip.ngpio = priv->npins;
|
||||
|
||||
|
|
|
@ -267,8 +267,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
|
|||
|
||||
static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
|
||||
{
|
||||
int rid = rdev_get_id(rdev);
|
||||
int ctr_bit, reg;
|
||||
unsigned int rid = rdev_get_id(rdev);
|
||||
unsigned int ctr_bit, reg;
|
||||
|
||||
reg = RK806_POWER_FPWM_EN0 + rid / 8;
|
||||
ctr_bit = rid % 8;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
|
||||
#define PCF85063_REG_CTRL1_STOP BIT(5)
|
||||
#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
|
||||
#define PCF85063_REG_CTRL1_SWR 0x58
|
||||
|
||||
#define PCF85063_REG_CTRL2 0x01
|
||||
#define PCF85063_CTRL2_AF BIT(6)
|
||||
|
@ -589,7 +590,7 @@ static int pcf85063_probe(struct i2c_client *client)
|
|||
|
||||
i2c_set_clientdata(client, pcf85063);
|
||||
|
||||
err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
|
||||
err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "RTC chip is not present\n");
|
||||
return err;
|
||||
|
@ -599,6 +600,22 @@ static int pcf85063_probe(struct i2c_client *client)
|
|||
if (IS_ERR(pcf85063->rtc))
|
||||
return PTR_ERR(pcf85063->rtc);
|
||||
|
||||
/*
|
||||
* If a Power loss is detected, SW reset the device.
|
||||
* From PCF85063A datasheet:
|
||||
* There is a low probability that some devices will have corruption
|
||||
* of the registers after the automatic power-on reset...
|
||||
*/
|
||||
if (tmp & PCF85063_REG_SC_OS) {
|
||||
dev_warn(&client->dev,
|
||||
"POR issue detected, sending a SW reset\n");
|
||||
err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
|
||||
PCF85063_REG_CTRL1_SWR);
|
||||
if (err < 0)
|
||||
dev_warn(&client->dev,
|
||||
"SW reset failed, trying to continue\n");
|
||||
}
|
||||
|
||||
err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
|
||||
config->force_cap_7000 ? 7000 : 0);
|
||||
if (err < 0)
|
||||
|
|
|
@ -263,6 +263,19 @@ static struct console sclp_console =
|
|||
.index = 0 /* ttyS0 */
|
||||
};
|
||||
|
||||
/*
|
||||
* Release allocated pages.
|
||||
*/
|
||||
static void __init __sclp_console_free_pages(void)
|
||||
{
|
||||
struct list_head *page, *p;
|
||||
|
||||
list_for_each_safe(page, p, &sclp_con_pages) {
|
||||
list_del(page);
|
||||
free_page((unsigned long)page);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* called by console_init() in drivers/char/tty_io.c at boot-time.
|
||||
*/
|
||||
|
@ -282,6 +295,10 @@ sclp_console_init(void)
|
|||
/* Allocate pages for output buffering */
|
||||
for (i = 0; i < sclp_console_pages; i++) {
|
||||
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!page) {
|
||||
__sclp_console_free_pages();
|
||||
return -ENOMEM;
|
||||
}
|
||||
list_add_tail(page, &sclp_con_pages);
|
||||
}
|
||||
sclp_conbuf = NULL;
|
||||
|
|
|
@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = {
|
|||
.flush_buffer = sclp_tty_flush_buffer,
|
||||
};
|
||||
|
||||
/* Release allocated pages. */
|
||||
static void __init __sclp_tty_free_pages(void)
|
||||
{
|
||||
struct list_head *page, *p;
|
||||
|
||||
list_for_each_safe(page, p, &sclp_tty_pages) {
|
||||
list_del(page);
|
||||
free_page((unsigned long)page);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init
|
||||
sclp_tty_init(void)
|
||||
{
|
||||
|
@ -516,6 +527,7 @@ sclp_tty_init(void)
|
|||
for (i = 0; i < MAX_KMEM_PAGES; i++) {
|
||||
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (page == NULL) {
|
||||
__sclp_tty_free_pages();
|
||||
tty_driver_kref_put(driver);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -911,8 +911,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
|
|||
container_of(work, typeof(*phy), works[event]);
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct asd_sas_port *sas_port = sas_phy->port;
|
||||
struct hisi_sas_port *port = phy->port;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct domain_device *port_dev;
|
||||
int phy_no = sas_phy->id;
|
||||
|
||||
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
|
||||
sas_port && port && (port->id != phy->port_id)) {
|
||||
dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
|
||||
phy_no, port->id, phy->port_id);
|
||||
port_dev = sas_port->port_dev;
|
||||
if (port_dev && !dev_is_expander(port_dev->dev_type)) {
|
||||
/*
|
||||
* Set the device state to gone to block
|
||||
* sending IO to the device.
|
||||
*/
|
||||
set_bit(SAS_DEV_GONE, &port_dev->state);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
phy->wait_phyup_cnt = 0;
|
||||
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
|
||||
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
|
||||
|
|
|
@ -719,6 +719,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
|
|||
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||
}
|
||||
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
|
||||
pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
|
||||
pm8001_free_dev(pm8001_dev);
|
||||
} else {
|
||||
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user