mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
Merge branch 'v5.7/base' into v5.7/standard/intel-x86
This commit is contained in:
commit
5b91fd13b0
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 17
|
||||
SUBLEVEL = 18
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
|
|
@ -490,10 +490,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define ioread16be(p) be16_to_cpu(ioread16(p))
|
||||
#define ioread32be(p) be32_to_cpu(ioread32(p))
|
||||
#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
|
||||
#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
|
||||
#define ioread16be(p) swab16(ioread16(p))
|
||||
#define ioread32be(p) swab32(ioread32(p))
|
||||
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
|
||||
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
|
||||
|
||||
#define inb_p inb
|
||||
#define inw_p inw
|
||||
|
|
|
@ -156,6 +156,7 @@ zinstall install:
|
|||
PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
|
||||
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
|
||||
|
||||
# We use MRPROPER_FILES and CLEAN_FILES now
|
||||
archclean:
|
||||
|
|
|
@ -440,7 +440,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
|||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end, unsigned flags);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
|
|
@ -201,7 +201,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
|
|||
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
|
||||
|
||||
# Install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
quiet_cmd_vdso_install = INSTALL32 $@
|
||||
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
|
||||
|
||||
vdso.so: $(obj)/vdso.so.dbg
|
||||
|
|
|
@ -89,9 +89,9 @@
|
|||
* coherency though in all cases. And for copyback caches we will need
|
||||
* to push cached data as well.
|
||||
*/
|
||||
#define CACHE_INIT CACR_CINVA
|
||||
#define CACHE_INVALIDATE CACR_CINVA
|
||||
#define CACHE_INVALIDATED CACR_CINVA
|
||||
#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
|
||||
#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
|
||||
#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
|
||||
|
||||
#define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
|
||||
(0x000f0000) + \
|
||||
|
|
|
@ -939,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
|
|||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end, unsigned flags);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
|
|
@ -497,7 +497,7 @@ static void __init mips_parse_crashkernel(void)
|
|||
if (ret != 0 || crash_size <= 0)
|
||||
return;
|
||||
|
||||
if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
|
||||
if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
|
||||
pr_warn("Invalid memory region reserved for crash kernel\n");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -518,7 +518,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
|
|||
return 1;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ enum fixed_addresses {
|
|||
FIX_HOLE,
|
||||
/* reserve the top 128K for early debugging purposes */
|
||||
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
|
||||
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128, PAGE_SIZE)/PAGE_SIZE)-1,
|
||||
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
|
|
|
@ -58,7 +58,8 @@
|
|||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
|
||||
extern int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned flags);
|
||||
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
|
|
@ -837,7 +837,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
|||
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
||||
}
|
||||
|
|
|
@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
/* kvm_unmap_hva flushes everything anyways */
|
||||
kvm_unmap_hva(kvm, start);
|
||||
|
|
|
@ -107,22 +107,28 @@ static int pseries_cpu_disable(void)
|
|||
*/
|
||||
static void pseries_cpu_die(unsigned int cpu)
|
||||
{
|
||||
int tries;
|
||||
int cpu_status = 1;
|
||||
unsigned int pcpu = get_hard_smp_processor_id(cpu);
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(120000);
|
||||
|
||||
for (tries = 0; tries < 25; tries++) {
|
||||
while (true) {
|
||||
cpu_status = smp_query_cpu_stopped(pcpu);
|
||||
if (cpu_status == QCSS_STOPPED ||
|
||||
cpu_status == QCSS_HARDWARE_ERROR)
|
||||
break;
|
||||
cpu_relax();
|
||||
|
||||
if (time_after(jiffies, timeout)) {
|
||||
pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
|
||||
cpu, pcpu);
|
||||
timeout = jiffies + msecs_to_jiffies(120000);
|
||||
}
|
||||
|
||||
if (cpu_status != 0) {
|
||||
printk("Querying DEAD? cpu %i (%i) shows %i\n",
|
||||
cpu, pcpu, cpu_status);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (cpu_status == QCSS_HARDWARE_ERROR) {
|
||||
pr_warn("CPU %i (hwid %i) reported error while dying\n",
|
||||
cpu, pcpu);
|
||||
}
|
||||
|
||||
/* Isolation and deallocation are definitely done by
|
||||
|
|
|
@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
|
|||
case EPOW_SHUTDOWN_ON_UPS:
|
||||
pr_emerg("Loss of system power detected. System is running on"
|
||||
" UPS/battery. Check RTAS error log for details\n");
|
||||
orderly_poweroff(true);
|
||||
break;
|
||||
|
||||
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
|
||||
|
|
|
@ -22,6 +22,7 @@ SECTIONS
|
|||
/* Beginning of code and text segment */
|
||||
. = LOAD_OFFSET;
|
||||
_start = .;
|
||||
_stext = .;
|
||||
HEAD_TEXT_SECTION
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
|
@ -49,7 +50,6 @@ SECTIONS
|
|||
. = ALIGN(SECTION_ALIGN);
|
||||
.text : {
|
||||
_text = .;
|
||||
_stext = .;
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
CPUIDLE_TEXT
|
||||
|
|
|
@ -1311,7 +1311,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
|
|||
cb->pc == 1 &&
|
||||
cb->qc == 0 &&
|
||||
cb->reserved2 == 0 &&
|
||||
cb->key == PAGE_DEFAULT_KEY &&
|
||||
cb->reserved3 == 0 &&
|
||||
cb->reserved4 == 0 &&
|
||||
cb->reserved5 == 0 &&
|
||||
|
@ -1375,7 +1374,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
|
|||
kfree(data);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Override access key in any case, since user space should
|
||||
* not be able to set it, nor should it care about it.
|
||||
*/
|
||||
ri_cb.key = PAGE_DEFAULT_KEY >> 4;
|
||||
preempt_disable();
|
||||
if (!target->thread.ri_cb)
|
||||
target->thread.ri_cb = data;
|
||||
|
|
|
@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
|
|||
cb->k = 1;
|
||||
cb->ps = 1;
|
||||
cb->pc = 1;
|
||||
cb->key = PAGE_DEFAULT_KEY;
|
||||
cb->key = PAGE_DEFAULT_KEY >> 4;
|
||||
cb->v = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1606,7 +1606,8 @@ asmlinkage void kvm_spurious_fault(void);
|
|||
_ASM_EXTABLE(666b, 667b)
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
|
|
@ -1972,7 +1972,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
|||
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
|
||||
}
|
||||
|
|
|
@ -956,7 +956,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
{
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
|
||||
X86_CR4_SMEP;
|
||||
|
||||
if (kvm_valid_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/xen/pci.h>
|
||||
#include <asm/xen/cpuid.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/i8259.h>
|
||||
|
||||
static int xen_pcifront_enable_irq(struct pci_dev *dev)
|
||||
|
|
|
@ -269,6 +269,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
|||
npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
|
||||
rodata = __pa(__start_rodata);
|
||||
pfn = rodata >> PAGE_SHIFT;
|
||||
|
||||
pf = _PAGE_NX | _PAGE_ENC;
|
||||
if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
|
||||
pr_err("Failed to map kernel rodata 1:1\n");
|
||||
return 1;
|
||||
|
|
|
@ -1572,6 +1572,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
|||
|
||||
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
|
||||
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
|
||||
cpu->pstate.turbo_pstate = phy_max;
|
||||
} else {
|
||||
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
}
|
||||
|
|
|
@ -1710,9 +1710,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
|
|||
if (uncorrected_error) {
|
||||
core_err_cnt = 1;
|
||||
if (ripv)
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
else
|
||||
tp_event = HW_EVENT_ERR_UNCORRECTED;
|
||||
else
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_CORRECTED;
|
||||
}
|
||||
|
|
|
@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
|
|||
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
|
||||
int rc;
|
||||
|
||||
tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
|
||||
tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
|
||||
HW_EVENT_ERR_CORRECTED;
|
||||
|
||||
/*
|
||||
|
|
|
@ -2982,9 +2982,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
|
|||
if (uncorrected_error) {
|
||||
core_err_cnt = 1;
|
||||
if (ripv) {
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_UNCORRECTED;
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
}
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_CORRECTED;
|
||||
|
|
|
@ -494,9 +494,9 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
|
|||
if (uncorrected_error) {
|
||||
core_err_cnt = 1;
|
||||
if (ripv) {
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_UNCORRECTED;
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_FATAL;
|
||||
}
|
||||
} else {
|
||||
tp_event = HW_EVENT_ERR_CORRECTED;
|
||||
|
|
|
@ -381,6 +381,7 @@ static int __init efisubsys_init(void)
|
|||
efi_kobj = kobject_create_and_add("efi", firmware_kobj);
|
||||
if (!efi_kobj) {
|
||||
pr_err("efi: Firmware registration failed.\n");
|
||||
destroy_workqueue(efi_rts_wq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -424,6 +425,7 @@ err_unregister:
|
|||
generic_ops_unregister();
|
||||
err_put:
|
||||
kobject_put(efi_kobj);
|
||||
destroy_workqueue(efi_rts_wq);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,10 +73,14 @@ void efi_printk(char *str)
|
|||
*/
|
||||
efi_status_t efi_parse_options(char const *cmdline)
|
||||
{
|
||||
size_t len = strlen(cmdline) + 1;
|
||||
size_t len;
|
||||
efi_status_t status;
|
||||
char *str, *buf;
|
||||
|
||||
if (!cmdline)
|
||||
return EFI_SUCCESS;
|
||||
|
||||
len = strlen(cmdline) + 1;
|
||||
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, len, (void **)&buf);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
@ -87,6 +91,8 @@ efi_status_t efi_parse_options(char const *cmdline)
|
|||
char *param, *val;
|
||||
|
||||
str = next_arg(str, ¶m, &val);
|
||||
if (!val && !strcmp(param, "--"))
|
||||
break;
|
||||
|
||||
if (!strcmp(param, "nokaslr")) {
|
||||
efi_nokaslr = true;
|
||||
|
|
|
@ -1983,6 +1983,7 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
|
||||
drm_connector_update_edid_property(connector,
|
||||
aconnector->edid);
|
||||
drm_add_edid_modes(connector, aconnector->edid);
|
||||
|
||||
if (aconnector->dc_link->aux_mode)
|
||||
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
|
||||
|
|
|
@ -3113,12 +3113,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
dc_is_virtual_signal(pipe_ctx->stream->signal))
|
||||
return;
|
||||
|
||||
dc->hwss.blank_stream(pipe_ctx);
|
||||
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
||||
update_psp_stream_config(pipe_ctx, true);
|
||||
#endif
|
||||
|
||||
dc->hwss.blank_stream(pipe_ctx);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
deallocate_mst_payload(pipe_ctx);
|
||||
|
||||
|
@ -3146,11 +3145,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
|
|||
write_i2c_redriver_setting(pipe_ctx, false);
|
||||
}
|
||||
}
|
||||
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
|
||||
dc->hwss.disable_stream(pipe_ctx);
|
||||
|
||||
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
|
||||
if (pipe_ctx->stream->timing.flags.DSC) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dp_set_dsc_enable(pipe_ctx, false);
|
||||
|
|
|
@ -1103,10 +1103,6 @@ static inline enum link_training_result perform_link_training_int(
|
|||
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
|
||||
dpcd_set_training_pattern(link, dpcd_pattern);
|
||||
|
||||
/* delay 5ms after notifying sink of idle pattern before switching output */
|
||||
if (link->connector_signal != SIGNAL_TYPE_EDP)
|
||||
msleep(5);
|
||||
|
||||
/* 4. mainlink output idle pattern*/
|
||||
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
|
||||
|
@ -1556,12 +1552,6 @@ bool perform_link_training_with_retries(
|
|||
struct dc_link *link = stream->link;
|
||||
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
|
||||
|
||||
/* We need to do this before the link training to ensure the idle pattern in SST
|
||||
* mode will be sent right after the link training
|
||||
*/
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
for (j = 0; j < attempts; ++j) {
|
||||
|
||||
dp_enable_link_phy(
|
||||
|
@ -1578,6 +1568,12 @@ bool perform_link_training_with_retries(
|
|||
|
||||
dp_set_panel_mode(link, panel_mode);
|
||||
|
||||
/* We need to do this before the link training to ensure the idle pattern in SST
|
||||
* mode will be sent right after the link training
|
||||
*/
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
if (link->aux_access_disabled) {
|
||||
dc_link_dp_perform_link_training_skip_aux(link, link_setting);
|
||||
return true;
|
||||
|
|
|
@ -1090,17 +1090,8 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
|
|||
dc_link_set_abm_disable(link);
|
||||
}
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
|
||||
|
||||
/*
|
||||
* After output is idle pattern some sinks need time to recognize the stream
|
||||
* has changed or they enter protection state and hang.
|
||||
*/
|
||||
if (!dc_is_embedded_signal(pipe_ctx->stream->signal))
|
||||
msleep(60);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1386,8 +1386,8 @@ static void dcn20_update_dchubp_dpp(
|
|||
|
||||
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
|
||||
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
|
||||
pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport)
|
||||
&& pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
|
||||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
|
||||
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
|
||||
dc->hwss.set_cursor_position(pipe_ctx);
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
|
||||
|
|
|
@ -3031,7 +3031,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
|||
int vlevel = 0;
|
||||
int pipe_split_from[MAX_PIPES];
|
||||
int pipe_cnt = 0;
|
||||
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||||
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
BW_VAL_TRACE_COUNT();
|
||||
|
|
|
@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
|
|||
*/
|
||||
static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
|
||||
{
|
||||
if (arg1.value == 0)
|
||||
return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
|
||||
|
||||
return dc_fixpt_exp(
|
||||
dc_fixpt_mul(
|
||||
dc_fixpt_log(arg1),
|
||||
|
|
|
@ -1537,7 +1537,7 @@ static const struct drm_display_mode frida_frd350h54004_mode = {
|
|||
.vsync_end = 240 + 2 + 6,
|
||||
.vtotal = 240 + 2 + 6 + 2,
|
||||
.vrefresh = 60,
|
||||
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
|
||||
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
|
||||
};
|
||||
|
||||
static const struct panel_desc frida_frd350h54004 = {
|
||||
|
|
|
@ -505,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
|
|||
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
{
|
||||
unsigned long offset = (addr) - vma->vm_start;
|
||||
struct ttm_buffer_object *bo = vma->vm_private_data;
|
||||
unsigned long offset = (addr) - vma->vm_start +
|
||||
((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
|
||||
<< PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
|
||||
|
|
|
@ -229,32 +229,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
obj = drm_gem_object_lookup(file, handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
if (!obj->filp) {
|
||||
ret = -EINVAL;
|
||||
goto unref;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
if (ret)
|
||||
goto unref;
|
||||
|
||||
*offset = drm_vma_node_offset_addr(&obj->vma_node);
|
||||
unref:
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_ioctl_desc vgem_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
|
||||
|
@ -448,7 +422,6 @@ static struct drm_driver vgem_driver = {
|
|||
.fops = &vgem_driver_fops,
|
||||
|
||||
.dumb_create = vgem_gem_dumb_create,
|
||||
.dumb_map_offset = vgem_gem_dumb_map,
|
||||
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
|
|
|
@ -180,6 +180,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
|
||||
vfpriv->ctx_id, buflist, out_fence);
|
||||
dma_fence_put(&out_fence->f);
|
||||
virtio_gpu_notify(vgdev);
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -811,7 +811,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
|
|||
struct ib_event event;
|
||||
unsigned int flags;
|
||||
|
||||
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
|
||||
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
|
||||
rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
|
||||
bnxt_re_unlock_cqs(qp, flags);
|
||||
|
|
|
@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
|
|||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
switch (prev->wr.opcode) {
|
||||
case IB_WR_TID_RDMA_WRITE:
|
||||
req = wqe_to_tid_req(prev);
|
||||
|
|
|
@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
|
|||
{
|
||||
int type = *((unsigned int *)kp->arg);
|
||||
|
||||
return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
|
||||
return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
|
||||
}
|
||||
|
||||
static int __init psmouse_init(void)
|
||||
|
|
|
@ -825,19 +825,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
|
|||
struct request_queue *q;
|
||||
const size_t max_stripes = min_t(size_t, INT_MAX,
|
||||
SIZE_MAX / sizeof(atomic_t));
|
||||
size_t n;
|
||||
uint64_t n;
|
||||
int idx;
|
||||
|
||||
if (!d->stripe_size)
|
||||
d->stripe_size = 1 << 31;
|
||||
|
||||
d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
|
||||
|
||||
if (!d->nr_stripes || d->nr_stripes > max_stripes) {
|
||||
pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
|
||||
(unsigned int)d->nr_stripes);
|
||||
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
|
||||
if (!n || n > max_stripes) {
|
||||
pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
|
||||
n);
|
||||
return -ENOMEM;
|
||||
}
|
||||
d->nr_stripes = n;
|
||||
|
||||
n = d->nr_stripes * sizeof(atomic_t);
|
||||
d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
|
||||
|
|
|
@ -369,20 +369,25 @@ static int budget_register(struct budget *budget)
|
|||
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_release_dmx;
|
||||
|
||||
budget->mem_frontend.source = DMX_MEMORY_FE;
|
||||
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_release_dmx;
|
||||
|
||||
ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_release_dmx;
|
||||
|
||||
dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
|
||||
|
||||
return 0;
|
||||
|
||||
err_release_dmx:
|
||||
dvb_dmxdev_release(&budget->dmxdev);
|
||||
dvb_dmx_release(&budget->demux);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void budget_unregister(struct budget *budget)
|
||||
|
|
|
@ -505,19 +505,31 @@ static void vpss_exit(void)
|
|||
|
||||
static int __init vpss_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
|
||||
return -EBUSY;
|
||||
|
||||
oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
|
||||
if (unlikely(!oper_cfg.vpss_regs_base2)) {
|
||||
release_mem_region(VPSS_CLK_CTRL, 4);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_ioremap;
|
||||
}
|
||||
|
||||
writel(VPSS_CLK_CTRL_VENCCLKEN |
|
||||
VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
|
||||
|
||||
return platform_driver_register(&vpss_driver);
|
||||
ret = platform_driver_register(&vpss_driver);
|
||||
if (ret)
|
||||
goto err_pd_register;
|
||||
|
||||
return 0;
|
||||
|
||||
err_pd_register:
|
||||
iounmap(oper_cfg.vpss_regs_base2);
|
||||
err_ioremap:
|
||||
release_mem_region(VPSS_CLK_CTRL, 4);
|
||||
return ret;
|
||||
}
|
||||
subsys_initcall(vpss_init);
|
||||
module_exit(vpss_exit);
|
||||
|
|
|
@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss)
|
|||
return num_subdevs;
|
||||
|
||||
err_cleanup:
|
||||
v4l2_async_notifier_cleanup(&camss->notifier);
|
||||
of_node_put(node);
|
||||
return ret;
|
||||
}
|
||||
|
@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev)
|
|||
camss->csid_num = 4;
|
||||
camss->vfe_num = 2;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
|
||||
sizeof(*camss->csiphy), GFP_KERNEL);
|
||||
if (!camss->csiphy)
|
||||
return -ENOMEM;
|
||||
if (!camss->csiphy) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
|
||||
GFP_KERNEL);
|
||||
if (!camss->csid)
|
||||
return -ENOMEM;
|
||||
if (!camss->csid) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
|
||||
GFP_KERNEL);
|
||||
if (!camss->vfe)
|
||||
return -ENOMEM;
|
||||
if (!camss->vfe) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
v4l2_async_notifier_init(&camss->notifier);
|
||||
|
||||
num_subdevs = camss_of_parse_ports(camss);
|
||||
if (num_subdevs < 0)
|
||||
return num_subdevs;
|
||||
if (num_subdevs < 0) {
|
||||
ret = num_subdevs;
|
||||
goto err_cleanup;
|
||||
}
|
||||
|
||||
ret = camss_init_subdevices(camss);
|
||||
if (ret < 0)
|
||||
|
@ -936,6 +944,8 @@ err_register_entities:
|
|||
v4l2_device_unregister(&camss->v4l2_dev);
|
||||
err_cleanup:
|
||||
v4l2_async_notifier_cleanup(&camss->notifier);
|
||||
err_free:
|
||||
kfree(camss);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2084,7 +2084,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
|
|||
int ret;
|
||||
|
||||
ret = __bond_release_one(bond_dev, slave_dev, false, true);
|
||||
if (ret == 0 && !bond_has_slaves(bond)) {
|
||||
if (ret == 0 && !bond_has_slaves(bond) &&
|
||||
bond_dev->reg_state != NETREG_UNREGISTERING) {
|
||||
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
|
||||
netdev_info(bond_dev, "Destroying bond\n");
|
||||
bond_remove_proc_entry(bond);
|
||||
|
@ -2824,6 +2825,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
|||
if (bond_time_in_interval(bond, last_rx, 1)) {
|
||||
bond_propose_link_state(slave, BOND_LINK_UP);
|
||||
commit++;
|
||||
} else if (slave->link == BOND_LINK_BACK) {
|
||||
bond_propose_link_state(slave, BOND_LINK_FAIL);
|
||||
commit++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -2932,6 +2936,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
|
|||
|
||||
continue;
|
||||
|
||||
case BOND_LINK_FAIL:
|
||||
bond_set_slave_link_state(slave, BOND_LINK_FAIL,
|
||||
BOND_SLAVE_NOTIFY_NOW);
|
||||
bond_set_slave_inactive_flags(slave,
|
||||
BOND_SLAVE_NOTIFY_NOW);
|
||||
|
||||
/* A slave has just been enslaved and has become
|
||||
* the current active slave.
|
||||
*/
|
||||
if (rtnl_dereference(bond->curr_active_slave))
|
||||
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
|
||||
continue;
|
||||
|
||||
default:
|
||||
slave_err(bond->dev, slave->dev,
|
||||
"impossible: link_new_state %d on slave\n",
|
||||
|
@ -2982,8 +2999,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
|
|||
return should_notify_rtnl;
|
||||
}
|
||||
|
||||
bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
|
||||
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
if (!found && !before && bond_slave_is_up(slave))
|
||||
before = slave;
|
||||
|
@ -4336,13 +4351,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
|
||||
{
|
||||
if (speed == 0 || speed == SPEED_UNKNOWN)
|
||||
speed = slave->speed;
|
||||
else
|
||||
speed = min(speed, slave->speed);
|
||||
|
||||
return speed;
|
||||
}
|
||||
|
||||
static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
unsigned long speed = 0;
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
u32 speed = 0;
|
||||
|
||||
cmd->base.duplex = DUPLEX_UNKNOWN;
|
||||
cmd->base.port = PORT_OTHER;
|
||||
|
@ -4354,8 +4379,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
|
|||
*/
|
||||
bond_for_each_slave(bond, slave, iter) {
|
||||
if (bond_slave_can_tx(slave)) {
|
||||
if (slave->speed != SPEED_UNKNOWN)
|
||||
if (slave->speed != SPEED_UNKNOWN) {
|
||||
if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
|
||||
speed = bond_mode_bcast_speed(slave,
|
||||
speed);
|
||||
else
|
||||
speed += slave->speed;
|
||||
}
|
||||
if (cmd->base.duplex == DUPLEX_UNKNOWN &&
|
||||
slave->duplex != DUPLEX_UNKNOWN)
|
||||
cmd->base.duplex = slave->duplex;
|
||||
|
|
|
@ -1556,6 +1556,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
|
|||
return ret;
|
||||
|
||||
switch (ret) {
|
||||
case -ETIMEDOUT:
|
||||
return ret;
|
||||
case -ENOSPC:
|
||||
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
|
||||
addr, vid);
|
||||
|
|
|
@ -2166,12 +2166,9 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
|
|||
int i;
|
||||
|
||||
for (i = first_index; i < first_index + count; i++) {
|
||||
/* Check if napi was initialized before */
|
||||
if (!ENA_IS_XDP_INDEX(adapter, i) ||
|
||||
adapter->ena_napi[i].xdp_ring)
|
||||
netif_napi_del(&adapter->ena_napi[i].napi);
|
||||
else
|
||||
WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
|
||||
|
||||
WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
|
||||
adapter->ena_napi[i].xdp_ring);
|
||||
}
|
||||
}
|
||||
|
@ -3508,16 +3505,14 @@ static void ena_fw_reset_device(struct work_struct *work)
|
|||
{
|
||||
struct ena_adapter *adapter =
|
||||
container_of(work, struct ena_adapter, reset_task);
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
|
||||
if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
|
||||
dev_err(&pdev->dev,
|
||||
"device reset schedule while reset bit is off\n");
|
||||
return;
|
||||
}
|
||||
rtnl_lock();
|
||||
|
||||
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
|
||||
ena_destroy_device(adapter, false);
|
||||
ena_restore_device(adapter);
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -4351,8 +4346,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
|
|||
netdev->rx_cpu_rmap = NULL;
|
||||
}
|
||||
#endif /* CONFIG_RFS_ACCEL */
|
||||
del_timer_sync(&adapter->timer_service);
|
||||
|
||||
/* Make sure timer and reset routine won't be called after
|
||||
* freeing device resources.
|
||||
*/
|
||||
del_timer_sync(&adapter->timer_service);
|
||||
cancel_work_sync(&adapter->reset_task);
|
||||
|
||||
rtnl_lock(); /* lock released inside the below if-else block */
|
||||
|
|
|
@ -2388,7 +2388,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
|||
|
||||
dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
|
||||
|
||||
netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
|
||||
netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
|
||||
if (!netdev) {
|
||||
dev_err(dev, "Can't allocate ethernet device #%d\n", id);
|
||||
return -ENOMEM;
|
||||
|
@ -2520,7 +2520,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
port->netdev = NULL;
|
||||
free_netdev(netdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2529,7 +2528,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
|
|||
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
|
||||
|
||||
gemini_port_remove(port);
|
||||
free_netdev(port->netdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3719,11 +3719,11 @@ failed_mii_init:
|
|||
failed_irq:
|
||||
failed_init:
|
||||
fec_ptp_stop(pdev);
|
||||
if (fep->reg_phy)
|
||||
regulator_disable(fep->reg_phy);
|
||||
failed_reset:
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
if (fep->reg_phy)
|
||||
regulator_disable(fep->reg_phy);
|
||||
failed_regulator:
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
failed_clk_ahb:
|
||||
|
|
|
@ -1211,7 +1211,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
|
|||
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
|
||||
#define I40E_AQC_SET_VSI_DEFAULT 0x08
|
||||
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
|
||||
#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
|
||||
#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
|
||||
__le16 seid;
|
||||
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
|
||||
__le16 vlan_tag;
|
||||
|
|
|
@ -1969,6 +1969,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
|
|||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_is_aq_api_ver_ge
|
||||
* @aq: pointer to AdminQ info containing HW API version to compare
|
||||
* @maj: API major value
|
||||
* @min: API minor value
|
||||
*
|
||||
* Assert whether current HW API version is greater/equal than provided.
|
||||
**/
|
||||
static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
|
||||
u16 min)
|
||||
{
|
||||
return (aq->api_maj_ver > maj ||
|
||||
(aq->api_maj_ver == maj && aq->api_min_ver >= min));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_add_vsi
|
||||
* @hw: pointer to the hw struct
|
||||
|
@ -2094,18 +2109,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
|
|||
|
||||
if (set) {
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
|
||||
if (rx_only_promisc &&
|
||||
(((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
|
||||
(hw->aq.api_maj_ver > 1)))
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
|
||||
if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
|
||||
}
|
||||
|
||||
cmd->promiscuous_flags = cpu_to_le16(flags);
|
||||
|
||||
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
|
||||
if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
|
||||
(hw->aq.api_maj_ver > 1))
|
||||
cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
|
||||
if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
|
||||
cmd->valid_flags |=
|
||||
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
|
||||
|
||||
cmd->seid = cpu_to_le16(seid);
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
@ -2202,11 +2215,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
|
|||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
i40e_aqc_opc_set_vsi_promiscuous_modes);
|
||||
|
||||
if (enable)
|
||||
if (enable) {
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
|
||||
if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
|
||||
}
|
||||
|
||||
cmd->promiscuous_flags = cpu_to_le16(flags);
|
||||
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
|
||||
if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
|
||||
cmd->valid_flags |=
|
||||
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
|
||||
cmd->seid = cpu_to_le16(seid);
|
||||
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
|
||||
|
||||
|
|
|
@ -15344,6 +15344,9 @@ static void i40e_remove(struct pci_dev *pdev)
|
|||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
|
||||
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
|
||||
|
||||
while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/* no more scheduling of any task */
|
||||
set_bit(__I40E_SUSPENDED, pf->state);
|
||||
set_bit(__I40E_DOWN, pf->state);
|
||||
|
|
|
@ -4801,6 +4801,8 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
device_set_wakeup_enable(&adapter->pdev->dev,
|
||||
adapter->flags & IGC_FLAG_WOL_SUPPORTED);
|
||||
|
||||
igc_ptp_init(adapter);
|
||||
|
||||
/* reset the hardware with the new settings */
|
||||
igc_reset(adapter);
|
||||
|
||||
|
@ -4817,9 +4819,6 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
/* carrier off reporting is important to ethtool even BEFORE open */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
/* do hw tstamp init after resetting */
|
||||
igc_ptp_init(adapter);
|
||||
|
||||
/* Check if Media Autosense is enabled */
|
||||
adapter->ei = *ei;
|
||||
|
||||
|
|
|
@ -620,8 +620,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
|
|||
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
|
||||
|
||||
igc_ptp_reset(adapter);
|
||||
|
||||
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
|
||||
&adapter->pdev->dev);
|
||||
if (IS_ERR(adapter->ptp_clock)) {
|
||||
|
|
|
@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
|
|||
int rc;
|
||||
|
||||
skb->dev = vf_netdev;
|
||||
skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
|
||||
skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
|
||||
|
||||
rc = dev_queue_xmit(skb);
|
||||
if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
|
||||
|
|
|
@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
|
|||
kfree(port);
|
||||
}
|
||||
|
||||
#define IPVLAN_ALWAYS_ON_OFLOADS \
|
||||
(NETIF_F_SG | NETIF_F_HW_CSUM | \
|
||||
NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
|
||||
|
||||
#define IPVLAN_ALWAYS_ON \
|
||||
(IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
|
||||
|
||||
#define IPVLAN_FEATURES \
|
||||
(NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
|
||||
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
|
||||
NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
|
||||
NETIF_F_GRO | NETIF_F_RXCSUM | \
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
|
||||
|
||||
/* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
|
||||
|
||||
#define IPVLAN_STATE_MASK \
|
||||
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
|
||||
|
||||
|
@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
|
|||
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
|
||||
(phy_dev->state & IPVLAN_STATE_MASK);
|
||||
dev->features = phy_dev->features & IPVLAN_FEATURES;
|
||||
dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
|
||||
dev->features |= IPVLAN_ALWAYS_ON;
|
||||
dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
|
||||
dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
|
||||
dev->hw_enc_features |= dev->features;
|
||||
dev->gso_max_size = phy_dev->gso_max_size;
|
||||
dev->gso_max_segs = phy_dev->gso_max_segs;
|
||||
|
@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
|
|||
{
|
||||
struct ipvl_dev *ipvlan = netdev_priv(dev);
|
||||
|
||||
return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
|
||||
features |= NETIF_F_ALL_FOR_ALL;
|
||||
features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
|
||||
features = netdev_increment_features(ipvlan->phy_dev->features,
|
||||
features, features);
|
||||
features |= IPVLAN_ALWAYS_ON;
|
||||
features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static void ipvlan_change_rx_flags(struct net_device *dev, int change)
|
||||
|
@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
|
|||
|
||||
case NETDEV_FEAT_CHANGE:
|
||||
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
|
||||
ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
|
||||
ipvlan->dev->gso_max_size = dev->gso_max_size;
|
||||
ipvlan->dev->gso_max_segs = dev->gso_max_segs;
|
||||
netdev_features_change(ipvlan->dev);
|
||||
netdev_update_features(ipvlan->dev);
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -980,6 +980,11 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
|
|||
/* Don't error out as we'd break some existing DTs */
|
||||
continue;
|
||||
}
|
||||
if (range.cpu_addr == OF_BAD_ADDR) {
|
||||
pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
|
||||
range.bus_addr, node);
|
||||
continue;
|
||||
}
|
||||
dma_offset = range.cpu_addr - range.bus_addr;
|
||||
|
||||
/* Take lower and upper limits */
|
||||
|
|
|
@ -817,15 +817,23 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
|||
}
|
||||
|
||||
if (unlikely(!target_freq)) {
|
||||
if (opp_table->required_opp_tables) {
|
||||
ret = _set_required_opps(dev, opp_table, NULL);
|
||||
} else if (!_get_opp_count(opp_table)) {
|
||||
return 0;
|
||||
} else {
|
||||
dev_err(dev, "target frequency can't be 0\n");
|
||||
ret = -EINVAL;
|
||||
/*
|
||||
* Some drivers need to support cases where some platforms may
|
||||
* have OPP table for the device, while others don't and
|
||||
* opp_set_rate() just needs to behave like clk_set_rate().
|
||||
*/
|
||||
if (!_get_opp_count(opp_table)) {
|
||||
ret = 0;
|
||||
goto put_opp_table;
|
||||
}
|
||||
|
||||
if (!opp_table->required_opp_tables) {
|
||||
dev_err(dev, "target frequency can't be 0\n");
|
||||
ret = -EINVAL;
|
||||
goto put_opp_table;
|
||||
}
|
||||
|
||||
ret = _set_required_opps(dev, opp_table, NULL);
|
||||
goto put_opp_table;
|
||||
}
|
||||
|
||||
|
@ -845,11 +853,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
|
|||
|
||||
/* Return early if nothing to do */
|
||||
if (old_freq == freq) {
|
||||
if (!opp_table->required_opp_tables && !opp_table->regulators) {
|
||||
dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
|
||||
__func__, freq);
|
||||
ret = 0;
|
||||
goto put_opp_table;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For IO devices which require an OPP on some platforms/SoCs
|
||||
|
|
|
@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
|
|||
rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
|
||||
writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
|
||||
writel(rtc_alarm64, base + TIMER_ALARM_LOW);
|
||||
writel(1, base + TIMER_IRQ_ENABLED);
|
||||
} else {
|
||||
/*
|
||||
* if this function was called with enabled=0
|
||||
|
|
|
@ -430,7 +430,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
|
|||
return;
|
||||
}
|
||||
|
||||
del_timer(&req->timer);
|
||||
del_timer_sync(&req->timer);
|
||||
zfcp_fsf_protstatus_eval(req);
|
||||
zfcp_fsf_fsfstatus_eval(req);
|
||||
req->handler(req);
|
||||
|
@ -905,7 +905,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
|
||||
req->issued = get_tod_clock();
|
||||
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
|
||||
del_timer(&req->timer);
|
||||
del_timer_sync(&req->timer);
|
||||
/* lookup request again, list might have changed */
|
||||
zfcp_reqlist_find_rm(adapter->req_list, req_id);
|
||||
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
|
||||
|
|
|
@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
|
||||
if (PTR_ERR(fp) == -FC_EX_CLOSED)
|
||||
goto out;
|
||||
if (IS_ERR(fp))
|
||||
goto redisc;
|
||||
if (IS_ERR(fp)) {
|
||||
mutex_lock(&disc->disc_mutex);
|
||||
fc_disc_restart(disc);
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cp = fc_frame_payload_get(fp, sizeof(*cp));
|
||||
if (!cp)
|
||||
|
@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
|
|||
new_rdata->disc_id = disc->disc_id;
|
||||
fc_rport_login(new_rdata);
|
||||
}
|
||||
goto out;
|
||||
goto free_fp;
|
||||
}
|
||||
rdata->disc_id = disc->disc_id;
|
||||
mutex_unlock(&rdata->rp_mutex);
|
||||
|
@ -626,6 +630,8 @@ redisc:
|
|||
fc_disc_restart(disc);
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
}
|
||||
free_fp:
|
||||
fc_frame_free(fp);
|
||||
out:
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
if (!IS_ERR(fp))
|
||||
|
|
|
@ -2829,10 +2829,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
/* This may fail but that's ok */
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
|
||||
/* Turn off T10-DIF when FC-NVMe is enabled */
|
||||
if (ql2xnvmeenable)
|
||||
ql2xenabledif = 0;
|
||||
|
||||
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
|
||||
if (!ha) {
|
||||
ql_log_pci(ql_log_fatal, pdev, 0x0009,
|
||||
|
|
|
@ -38,6 +38,7 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
|
|||
/* Select MPHY refclk frequency */
|
||||
clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(dev, "Cannot claim MPHY clock.\n");
|
||||
goto clk_err;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define UFS_ANY_VENDOR 0xFFFF
|
||||
#define UFS_ANY_MODEL "ANY_MODEL"
|
||||
|
||||
#define UFS_VENDOR_MICRON 0x12C
|
||||
#define UFS_VENDOR_TOSHIBA 0x198
|
||||
#define UFS_VENDOR_SAMSUNG 0x1CE
|
||||
#define UFS_VENDOR_SKHYNIX 0x1AD
|
||||
|
|
|
@ -67,11 +67,23 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ufs_intel_ehl_init(struct ufs_hba *hba)
|
||||
{
|
||||
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
|
||||
.name = "intel-pci",
|
||||
.link_startup_notify = ufs_intel_link_startup_notify,
|
||||
};
|
||||
|
||||
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
|
||||
.name = "intel-pci",
|
||||
.init = ufs_intel_ehl_init,
|
||||
.link_startup_notify = ufs_intel_link_startup_notify,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* ufshcd_pci_suspend - suspend power management function
|
||||
|
@ -200,8 +212,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
|
|||
static const struct pci_device_id ufshcd_pci_tbl[] = {
|
||||
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
|
||||
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
|
|
|
@ -211,6 +211,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
|
|||
|
||||
static struct ufs_dev_fix ufs_fixups[] = {
|
||||
/* UFS cards deviations table */
|
||||
UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
|
||||
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
|
||||
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
|
||||
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
|
||||
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
|
||||
|
@ -645,7 +647,11 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
|
|||
*/
|
||||
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
|
||||
{
|
||||
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
|
||||
ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
else
|
||||
ufshcd_writel(hba, ~(1 << pos),
|
||||
REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -655,6 +661,9 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
|
|||
*/
|
||||
static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
|
||||
{
|
||||
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
|
||||
ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
|
||||
else
|
||||
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
|
||||
}
|
||||
|
||||
|
@ -2149,8 +2158,14 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||
return sg_segments;
|
||||
|
||||
if (sg_segments) {
|
||||
|
||||
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
|
||||
lrbp->utr_descriptor_ptr->prd_table_length =
|
||||
cpu_to_le16((u16)sg_segments);
|
||||
cpu_to_le16((sg_segments *
|
||||
sizeof(struct ufshcd_sg_entry)));
|
||||
else
|
||||
lrbp->utr_descriptor_ptr->prd_table_length =
|
||||
cpu_to_le16((u16) (sg_segments));
|
||||
|
||||
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
|
||||
|
||||
|
@ -3496,11 +3511,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
|
|||
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
|
||||
|
||||
/* Response upiu and prdt offset should be in double words */
|
||||
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
|
||||
utrdlp[i].response_upiu_offset =
|
||||
cpu_to_le16(response_offset);
|
||||
utrdlp[i].prd_table_offset =
|
||||
cpu_to_le16(prdt_offset);
|
||||
utrdlp[i].response_upiu_length =
|
||||
cpu_to_le16(ALIGNED_UPIU_SIZE);
|
||||
} else {
|
||||
utrdlp[i].response_upiu_offset =
|
||||
cpu_to_le16(response_offset >> 2);
|
||||
utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2);
|
||||
utrdlp[i].prd_table_offset =
|
||||
cpu_to_le16(prdt_offset >> 2);
|
||||
utrdlp[i].response_upiu_length =
|
||||
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
|
||||
}
|
||||
|
||||
ufshcd_init_lrb(hba, &hba->lrb[i], i);
|
||||
}
|
||||
|
@ -3530,6 +3555,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
|
|||
"dme-link-startup: error code %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
/**
|
||||
* ufshcd_dme_reset - UIC command for DME_RESET
|
||||
* @hba: per adapter instance
|
||||
*
|
||||
* DME_RESET command is issued in order to reset UniPro stack.
|
||||
* This function now deals with cold reset.
|
||||
*
|
||||
* Returns 0 on success, non-zero value on failure
|
||||
*/
|
||||
static int ufshcd_dme_reset(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
int ret;
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_RESET;
|
||||
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
"dme-reset: error code %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_dme_enable - UIC command for DME_ENABLE
|
||||
* @hba: per adapter instance
|
||||
*
|
||||
* DME_ENABLE command is issued in order to enable UniPro stack.
|
||||
*
|
||||
* Returns 0 on success, non-zero value on failure
|
||||
*/
|
||||
static int ufshcd_dme_enable(struct ufs_hba *hba)
|
||||
{
|
||||
struct uic_command uic_cmd = {0};
|
||||
int ret;
|
||||
|
||||
uic_cmd.command = UIC_CMD_DME_ENABLE;
|
||||
|
||||
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
"dme-reset: error code %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
|
||||
{
|
||||
|
@ -4247,7 +4318,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
|
|||
}
|
||||
|
||||
/**
|
||||
* ufshcd_hba_enable - initialize the controller
|
||||
* ufshcd_hba_execute_hce - initialize the controller
|
||||
* @hba: per adapter instance
|
||||
*
|
||||
* The controller resets itself and controller firmware initialization
|
||||
|
@ -4256,7 +4327,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
|
|||
*
|
||||
* Returns 0 on success, non-zero value on failure
|
||||
*/
|
||||
int ufshcd_hba_enable(struct ufs_hba *hba)
|
||||
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
|
||||
{
|
||||
int retry;
|
||||
|
||||
|
@ -4304,6 +4375,32 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ufshcd_hba_enable(struct ufs_hba *hba)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
|
||||
ufshcd_set_link_off(hba);
|
||||
ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
|
||||
|
||||
/* enable UIC related interrupts */
|
||||
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
|
||||
ret = ufshcd_dme_reset(hba);
|
||||
if (!ret) {
|
||||
ret = ufshcd_dme_enable(hba);
|
||||
if (!ret)
|
||||
ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
"Host controller enable failed with non-hce\n");
|
||||
}
|
||||
} else {
|
||||
ret = ufshcd_hba_execute_hce(hba);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
|
||||
|
||||
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
|
||||
|
@ -4702,6 +4799,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||
/* overall command status of utrd */
|
||||
ocs = ufshcd_get_tr_ocs(lrbp);
|
||||
|
||||
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
|
||||
if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
|
||||
MASK_RSP_UPIU_RESULT)
|
||||
ocs = OCS_SUCCESS;
|
||||
}
|
||||
|
||||
switch (ocs) {
|
||||
case OCS_SUCCESS:
|
||||
result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
|
||||
|
@ -4880,7 +4983,8 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
|
|||
* false interrupt if device completes another request after resetting
|
||||
* aggregation and before reading the DB.
|
||||
*/
|
||||
if (ufshcd_is_intr_aggr_allowed(hba))
|
||||
if (ufshcd_is_intr_aggr_allowed(hba) &&
|
||||
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
|
||||
ufshcd_reset_intr_aggr(hba);
|
||||
|
||||
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
|
@ -5699,7 +5803,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
|||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||
} while (intr_status && --retries);
|
||||
|
||||
if (retval == IRQ_NONE) {
|
||||
if (enabled_intr_status && retval == IRQ_NONE) {
|
||||
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
|
||||
__func__, intr_status);
|
||||
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
|
||||
|
|
|
@ -518,6 +518,41 @@ enum ufshcd_quirks {
|
|||
* ops (get_ufs_hci_version) to get the correct version.
|
||||
*/
|
||||
UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
|
||||
|
||||
/*
|
||||
* Clear handling for transfer/task request list is just opposite.
|
||||
*/
|
||||
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
|
||||
|
||||
/*
|
||||
* This quirk needs to be enabled if host controller doesn't allow
|
||||
* that the interrupt aggregation timer and counter are reset by s/w.
|
||||
*/
|
||||
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
|
||||
|
||||
/*
|
||||
* This quirks needs to be enabled if host controller cannot be
|
||||
* enabled via HCE register.
|
||||
*/
|
||||
UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
|
||||
|
||||
/*
|
||||
* This quirk needs to be enabled if the host controller regards
|
||||
* resolution of the values of PRDTO and PRDTL in UTRD as byte.
|
||||
*/
|
||||
UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
|
||||
|
||||
/*
|
||||
* This quirk needs to be enabled if the host controller reports
|
||||
* OCS FATAL ERROR with device error through sense data
|
||||
*/
|
||||
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
|
||||
|
||||
/*
|
||||
* This quirk needs to be enabled if the host controller has
|
||||
* auto-hibernate capability but it doesn't work.
|
||||
*/
|
||||
UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
|
||||
};
|
||||
|
||||
enum ufshcd_caps {
|
||||
|
@ -767,7 +802,8 @@ return true;
|
|||
|
||||
static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
|
||||
{
|
||||
return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
|
||||
return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
|
||||
!(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
|
||||
}
|
||||
|
||||
static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
|
||||
|
|
|
@ -989,4 +989,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
|
|||
|
||||
endif # SPI_SLAVE
|
||||
|
||||
config SPI_DYNAMIC
|
||||
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
|
||||
|
||||
endif # SPI
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
@ -1985,6 +1986,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
pinctrl_pm_select_sleep_state(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1996,13 +1999,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
|
|||
|
||||
clk_disable_unprepare(spi->clk);
|
||||
|
||||
return 0;
|
||||
return pinctrl_pm_select_sleep_state(dev);
|
||||
}
|
||||
|
||||
static int stm32_spi_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct spi_master *master = dev_get_drvdata(dev);
|
||||
struct stm32_spi *spi = spi_master_get_devdata(master);
|
||||
int ret;
|
||||
|
||||
ret = pinctrl_pm_select_default_state(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return clk_prepare_enable(spi->clk);
|
||||
}
|
||||
|
@ -2032,10 +2040,23 @@ static int stm32_spi_resume(struct device *dev)
|
|||
return ret;
|
||||
|
||||
ret = spi_master_resume(master);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clk_disable_unprepare(spi->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to power device:%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spi->cfg->config(spi);
|
||||
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
|
|||
*/
|
||||
static DEFINE_MUTEX(board_lock);
|
||||
|
||||
/*
|
||||
* Prevents addition of devices with same chip select and
|
||||
* addition of devices below an unregistering controller.
|
||||
*/
|
||||
static DEFINE_MUTEX(spi_add_lock);
|
||||
|
||||
/**
|
||||
* spi_alloc_device - Allocate a new SPI device
|
||||
* @ctlr: Controller to which device is connected
|
||||
|
@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data)
|
|||
*/
|
||||
int spi_add_device(struct spi_device *spi)
|
||||
{
|
||||
static DEFINE_MUTEX(spi_add_lock);
|
||||
struct spi_controller *ctlr = spi->controller;
|
||||
struct device *dev = ctlr->dev.parent;
|
||||
int status;
|
||||
|
@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi)
|
|||
goto done;
|
||||
}
|
||||
|
||||
/* Controller may unregister concurrently */
|
||||
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
|
||||
!device_is_registered(&ctlr->dev)) {
|
||||
status = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Descriptors take precedence */
|
||||
if (ctlr->cs_gpiods)
|
||||
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
|
||||
|
@ -2761,6 +2773,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
|
|||
struct spi_controller *found;
|
||||
int id = ctlr->bus_num;
|
||||
|
||||
/* Prevent addition of new devices, unregister existing ones */
|
||||
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
|
||||
mutex_lock(&spi_add_lock);
|
||||
|
||||
device_for_each_child(&ctlr->dev, NULL, __unregister);
|
||||
|
||||
/* First make sure that this controller was ever added */
|
||||
|
@ -2781,6 +2797,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
|
|||
if (found == ctlr)
|
||||
idr_remove(&spi_master_idr, id);
|
||||
mutex_unlock(&board_lock);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
|
||||
mutex_unlock(&spi_add_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_unregister_controller);
|
||||
|
||||
|
|
|
@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
|
|||
size = round_up(size+offset, PAGE_SIZE);
|
||||
|
||||
while (size) {
|
||||
flush_dcache_page(virt_to_page(start));
|
||||
flush_dcache_page(vmalloc_to_page(start));
|
||||
start += PAGE_SIZE;
|
||||
size -= PAGE_SIZE;
|
||||
}
|
||||
|
|
|
@ -1149,13 +1149,16 @@ static int vfio_bus_type(struct device *dev, void *data)
|
|||
static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
||||
struct vfio_domain *domain)
|
||||
{
|
||||
struct vfio_domain *d;
|
||||
struct vfio_domain *d = NULL;
|
||||
struct rb_node *n;
|
||||
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
/* Arbitrarily pick the first domain in the list for lookups */
|
||||
d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
|
||||
if (!list_empty(&iommu->domain_list))
|
||||
d = list_first_entry(&iommu->domain_list,
|
||||
struct vfio_domain, next);
|
||||
|
||||
n = rb_first(&iommu->dma_list);
|
||||
|
||||
for (; n; n = rb_next(n)) {
|
||||
|
@ -1173,6 +1176,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
phys_addr_t p;
|
||||
dma_addr_t i;
|
||||
|
||||
if (WARN_ON(!d)) { /* mapped w/o a domain?! */
|
||||
ret = -EINVAL;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
phys = iommu_iova_to_phys(d->domain, iova);
|
||||
|
||||
if (WARN_ON(!phys)) {
|
||||
|
@ -1202,7 +1210,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
if (npage <= 0) {
|
||||
WARN_ON(!npage);
|
||||
ret = (int)npage;
|
||||
return ret;
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
phys = pfn << PAGE_SHIFT;
|
||||
|
@ -1211,14 +1219,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
|
|||
|
||||
ret = iommu_map(domain->domain, iova, phys,
|
||||
size, dma->prot | domain->prot);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
if (!dma->iommu_mapped)
|
||||
vfio_unpin_pages_remote(dma, iova,
|
||||
phys >> PAGE_SHIFT,
|
||||
size >> PAGE_SHIFT,
|
||||
true);
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
iova += size;
|
||||
}
|
||||
}
|
||||
|
||||
/* All dmas are now mapped, defer to second tree walk for unwind */
|
||||
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
|
||||
dma->iommu_mapped = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
for (; n; n = rb_prev(n)) {
|
||||
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
|
||||
dma_addr_t iova;
|
||||
|
||||
if (dma->iommu_mapped) {
|
||||
iommu_unmap(domain->domain, dma->iova, dma->size);
|
||||
continue;
|
||||
}
|
||||
|
||||
iova = dma->iova;
|
||||
while (iova < dma->iova + dma->size) {
|
||||
phys_addr_t phys, p;
|
||||
size_t size;
|
||||
dma_addr_t i;
|
||||
|
||||
phys = iommu_iova_to_phys(domain->domain, iova);
|
||||
if (!phys) {
|
||||
iova += PAGE_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
size = PAGE_SIZE;
|
||||
p = phys + size;
|
||||
i = iova + size;
|
||||
while (i < dma->iova + dma->size &&
|
||||
p == iommu_iova_to_phys(domain->domain, i)) {
|
||||
size += PAGE_SIZE;
|
||||
p += PAGE_SIZE;
|
||||
i += PAGE_SIZE;
|
||||
}
|
||||
|
||||
iommu_unmap(domain->domain, iova, size);
|
||||
vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
|
||||
size >> PAGE_SHIFT, true);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
|
|||
info->apertures->ranges[0].base = efifb_fix.smem_start;
|
||||
info->apertures->ranges[0].size = size_remap;
|
||||
|
||||
if (efi_enabled(EFI_BOOT) &&
|
||||
if (efi_enabled(EFI_MEMMAP) &&
|
||||
!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
|
||||
if ((efifb_fix.smem_start + efifb_fix.smem_len) >
|
||||
(md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
|
||||
|
|
|
@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
|
|||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
if (unlikely(vq->broken))
|
||||
return false;
|
||||
|
||||
virtio_mb(vq->weak_barriers);
|
||||
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
|
||||
virtqueue_poll_split(_vq, last_used_idx);
|
||||
|
|
|
@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
|
|||
asmlinkage __visible void xen_maybe_preempt_hcall(void)
|
||||
{
|
||||
if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
|
||||
&& need_resched())) {
|
||||
&& need_resched() && !preempt_count())) {
|
||||
/*
|
||||
* Clear flag as we may be rescheduled on a different
|
||||
* cpu.
|
||||
|
|
|
@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||
int order = get_order(size);
|
||||
phys_addr_t phys;
|
||||
u64 dma_mask = DMA_BIT_MASK(32);
|
||||
struct page *page;
|
||||
|
||||
if (hwdev && hwdev->coherent_dma_mask)
|
||||
dma_mask = hwdev->coherent_dma_mask;
|
||||
|
@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||
/* Convert the size to actually allocated. */
|
||||
size = 1UL << (order + XEN_PAGE_SHIFT);
|
||||
|
||||
if (is_vmalloc_addr(vaddr))
|
||||
page = vmalloc_to_page(vaddr);
|
||||
else
|
||||
page = virt_to_page(vaddr);
|
||||
|
||||
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
|
||||
range_straddles_page_boundary(phys, size)) &&
|
||||
TestClearPageXenRemapped(virt_to_page(vaddr)))
|
||||
TestClearPageXenRemapped(page))
|
||||
xen_destroy_contiguous_region(phys, order);
|
||||
|
||||
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
|
||||
|
|
|
@ -289,6 +289,7 @@ void afs_dynroot_depopulate(struct super_block *sb)
|
|||
net->dynroot_sb = NULL;
|
||||
mutex_unlock(&net->proc_cells_lock);
|
||||
|
||||
if (root) {
|
||||
inode_lock(root->d_inode);
|
||||
|
||||
/* Remove all the pins for dirs created for manually added cells */
|
||||
|
@ -300,4 +301,5 @@ void afs_dynroot_depopulate(struct super_block *sb)
|
|||
}
|
||||
|
||||
inode_unlock(root->d_inode);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4337,7 +4337,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fsc->mdsc = mdsc;
|
||||
init_completion(&mdsc->safe_umount_waiters);
|
||||
init_waitqueue_head(&mdsc->session_close_wq);
|
||||
INIT_LIST_HEAD(&mdsc->waiting_for_map);
|
||||
|
@ -4390,6 +4389,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
|
|||
|
||||
strscpy(mdsc->nodename, utsname()->nodename,
|
||||
sizeof(mdsc->nodename));
|
||||
|
||||
fsc->mdsc = mdsc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1994,11 +1994,13 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
|
|||
* not already there, and calling reverse_path_check()
|
||||
* during ep_insert().
|
||||
*/
|
||||
if (list_empty(&epi->ffd.file->f_tfile_llink))
|
||||
if (list_empty(&epi->ffd.file->f_tfile_llink)) {
|
||||
get_file(epi->ffd.file);
|
||||
list_add(&epi->ffd.file->f_tfile_llink,
|
||||
&tfile_check_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ep->mtx);
|
||||
|
||||
return error;
|
||||
|
@ -2040,6 +2042,7 @@ static void clear_tfile_check_list(void)
|
|||
file = list_first_entry(&tfile_check_list, struct file,
|
||||
f_tfile_llink);
|
||||
list_del_init(&file->f_tfile_llink);
|
||||
fput(file);
|
||||
}
|
||||
INIT_LIST_HEAD(&tfile_check_list);
|
||||
}
|
||||
|
@ -2200,25 +2203,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
|
|||
full_check = 1;
|
||||
if (is_file_epoll(tf.file)) {
|
||||
error = -ELOOP;
|
||||
if (ep_loop_check(ep, tf.file) != 0) {
|
||||
clear_tfile_check_list();
|
||||
if (ep_loop_check(ep, tf.file) != 0)
|
||||
goto error_tgt_fput;
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
get_file(tf.file);
|
||||
list_add(&tf.file->f_tfile_llink,
|
||||
&tfile_check_list);
|
||||
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
|
||||
if (error) {
|
||||
out_del:
|
||||
list_del(&tf.file->f_tfile_llink);
|
||||
goto error_tgt_fput;
|
||||
}
|
||||
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
|
||||
if (error)
|
||||
goto error_tgt_fput;
|
||||
if (is_file_epoll(tf.file)) {
|
||||
tep = tf.file->private_data;
|
||||
error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
|
||||
if (error) {
|
||||
mutex_unlock(&ep->mtx);
|
||||
goto out_del;
|
||||
goto error_tgt_fput;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2239,8 +2239,6 @@ out_del:
|
|||
error = ep_insert(ep, epds, tf.file, fd, full_check);
|
||||
} else
|
||||
error = -EEXIST;
|
||||
if (full_check)
|
||||
clear_tfile_check_list();
|
||||
break;
|
||||
case EPOLL_CTL_DEL:
|
||||
if (epi)
|
||||
|
@ -2263,8 +2261,10 @@ out_del:
|
|||
mutex_unlock(&ep->mtx);
|
||||
|
||||
error_tgt_fput:
|
||||
if (full_check)
|
||||
if (full_check) {
|
||||
clear_tfile_check_list();
|
||||
mutex_unlock(&epmutex);
|
||||
}
|
||||
|
||||
fdput(tf);
|
||||
error_fput:
|
||||
|
|
|
@ -24,6 +24,7 @@ struct ext4_system_zone {
|
|||
struct rb_node node;
|
||||
ext4_fsblk_t start_blk;
|
||||
unsigned int count;
|
||||
u32 ino;
|
||||
};
|
||||
|
||||
static struct kmem_cache *ext4_system_zone_cachep;
|
||||
|
@ -45,7 +46,8 @@ void ext4_exit_system_zone(void)
|
|||
static inline int can_merge(struct ext4_system_zone *entry1,
|
||||
struct ext4_system_zone *entry2)
|
||||
{
|
||||
if ((entry1->start_blk + entry1->count) == entry2->start_blk)
|
||||
if ((entry1->start_blk + entry1->count) == entry2->start_blk &&
|
||||
entry1->ino == entry2->ino)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks)
|
|||
*/
|
||||
static int add_system_zone(struct ext4_system_blocks *system_blks,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
unsigned int count, u32 ino)
|
||||
{
|
||||
struct ext4_system_zone *new_entry = NULL, *entry;
|
||||
struct ext4_system_zone *new_entry, *entry;
|
||||
struct rb_node **n = &system_blks->root.rb_node, *node;
|
||||
struct rb_node *parent = NULL, *new_node = NULL;
|
||||
|
||||
|
@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
|
|||
n = &(*n)->rb_left;
|
||||
else if (start_blk >= (entry->start_blk + entry->count))
|
||||
n = &(*n)->rb_right;
|
||||
else {
|
||||
if (start_blk + count > (entry->start_blk +
|
||||
entry->count))
|
||||
entry->count = (start_blk + count -
|
||||
entry->start_blk);
|
||||
new_node = *n;
|
||||
new_entry = rb_entry(new_node, struct ext4_system_zone,
|
||||
node);
|
||||
break;
|
||||
}
|
||||
else /* Unexpected overlap of system zones. */
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (!new_entry) {
|
||||
new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
|
||||
GFP_KERNEL);
|
||||
if (!new_entry)
|
||||
return -ENOMEM;
|
||||
new_entry->start_blk = start_blk;
|
||||
new_entry->count = count;
|
||||
new_entry->ino = ino;
|
||||
new_node = &new_entry->node;
|
||||
|
||||
rb_link_node(new_node, parent, n);
|
||||
rb_insert_color(new_node, &system_blks->root);
|
||||
}
|
||||
|
||||
/* Can we merge to the left? */
|
||||
node = rb_prev(new_node);
|
||||
|
@ -159,7 +152,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
|
|||
static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
|
||||
struct ext4_system_blocks *system_blks,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
unsigned int count, ino_t ino)
|
||||
{
|
||||
struct ext4_system_zone *entry;
|
||||
struct rb_node *n;
|
||||
|
@ -180,7 +173,7 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
|
|||
else if (start_blk >= (entry->start_blk + entry->count))
|
||||
n = n->rb_right;
|
||||
else
|
||||
return 0;
|
||||
return entry->ino == ino;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -214,19 +207,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
|
|||
if (n == 0) {
|
||||
i++;
|
||||
} else {
|
||||
if (!ext4_data_block_valid_rcu(sbi, system_blks,
|
||||
map.m_pblk, n)) {
|
||||
err = -EFSCORRUPTED;
|
||||
__ext4_error(sb, __func__, __LINE__, -err,
|
||||
map.m_pblk, "blocks %llu-%llu "
|
||||
"from inode %u overlap system zone",
|
||||
err = add_system_zone(system_blks, map.m_pblk, n, ino);
|
||||
if (err < 0) {
|
||||
if (err == -EFSCORRUPTED) {
|
||||
__ext4_error(sb, __func__, __LINE__,
|
||||
-err, map.m_pblk,
|
||||
"blocks %llu-%llu from inode %u overlap system zone",
|
||||
map.m_pblk,
|
||||
map.m_pblk + map.m_len - 1, ino);
|
||||
map.m_pblk + map.m_len - 1,
|
||||
ino);
|
||||
}
|
||||
break;
|
||||
}
|
||||
err = add_system_zone(system_blks, map.m_pblk, n);
|
||||
if (err < 0)
|
||||
break;
|
||||
i += n;
|
||||
}
|
||||
}
|
||||
|
@ -280,19 +272,19 @@ int ext4_setup_system_zone(struct super_block *sb)
|
|||
((i < 5) || ((i % flex_size) == 0)))
|
||||
add_system_zone(system_blks,
|
||||
ext4_group_first_block_no(sb, i),
|
||||
ext4_bg_num_gdb(sb, i) + 1);
|
||||
ext4_bg_num_gdb(sb, i) + 1, 0);
|
||||
gdp = ext4_get_group_desc(sb, i, NULL);
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_block_bitmap(sb, gdp), 1);
|
||||
ext4_block_bitmap(sb, gdp), 1, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_inode_bitmap(sb, gdp), 1);
|
||||
ext4_inode_bitmap(sb, gdp), 1, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_inode_table(sb, gdp),
|
||||
sbi->s_itb_per_group);
|
||||
sbi->s_itb_per_group, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -341,7 +333,7 @@ void ext4_release_system_zone(struct super_block *sb)
|
|||
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
|
||||
}
|
||||
|
||||
int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
|
||||
int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
{
|
||||
struct ext4_system_blocks *system_blks;
|
||||
|
@ -353,9 +345,9 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
|
|||
* mount option.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
system_blks = rcu_dereference(sbi->system_blks);
|
||||
ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
|
||||
count);
|
||||
system_blks = rcu_dereference(EXT4_SB(inode->i_sb)->system_blks);
|
||||
ret = ext4_data_block_valid_rcu(EXT4_SB(inode->i_sb), system_blks,
|
||||
start_blk, count, inode->i_ino);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
@ -374,8 +366,7 @@ int ext4_check_blockref(const char *function, unsigned int line,
|
|||
while (bref < p+max) {
|
||||
blk = le32_to_cpu(*bref++);
|
||||
if (blk &&
|
||||
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
|
||||
blk, 1))) {
|
||||
unlikely(!ext4_inode_block_valid(inode, blk, 1))) {
|
||||
ext4_error_inode(inode, function, line, blk,
|
||||
"invalid block");
|
||||
return -EFSCORRUPTED;
|
||||
|
|
|
@ -3338,7 +3338,7 @@ extern void ext4_release_system_zone(struct super_block *sb);
|
|||
extern int ext4_setup_system_zone(struct super_block *sb);
|
||||
extern int __init ext4_init_system_zone(void);
|
||||
extern void ext4_exit_system_zone(void);
|
||||
extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
|
||||
extern int ext4_inode_block_valid(struct inode *inode,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count);
|
||||
extern int ext4_check_blockref(const char *, unsigned int,
|
||||
|
|
|
@ -337,7 +337,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
|
|||
*/
|
||||
if (lblock + len <= lblock)
|
||||
return 0;
|
||||
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
|
||||
return ext4_inode_block_valid(inode, block, len);
|
||||
}
|
||||
|
||||
static int ext4_valid_extent_idx(struct inode *inode,
|
||||
|
@ -345,7 +345,7 @@ static int ext4_valid_extent_idx(struct inode *inode,
|
|||
{
|
||||
ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
|
||||
|
||||
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
|
||||
return ext4_inode_block_valid(inode, block, 1);
|
||||
}
|
||||
|
||||
static int ext4_valid_extent_entries(struct inode *inode,
|
||||
|
@ -500,14 +500,10 @@ __read_extent_tree_block(const char *function, unsigned int line,
|
|||
}
|
||||
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
|
||||
return bh;
|
||||
if (!ext4_has_feature_journal(inode->i_sb) ||
|
||||
(inode->i_ino !=
|
||||
le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
|
||||
err = __ext4_ext_check(function, line, inode,
|
||||
ext_block_hdr(bh), depth, pblk);
|
||||
if (err)
|
||||
goto errout;
|
||||
}
|
||||
set_buffer_verified(bh);
|
||||
/*
|
||||
* If this is a leaf block, cache all of its entries
|
||||
|
|
|
@ -428,6 +428,10 @@ restart:
|
|||
*/
|
||||
if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
|
||||
!ext4_overwrite_io(inode, offset, count))) {
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
inode_unlock_shared(inode);
|
||||
*ilock_shared = false;
|
||||
inode_lock(inode);
|
||||
|
|
|
@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
|
|||
else if (ext4_should_journal_data(inode))
|
||||
flags |= EXT4_FREE_BLOCKS_FORGET;
|
||||
|
||||
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
|
||||
count)) {
|
||||
if (!ext4_inode_block_valid(inode, block_to_free, count)) {
|
||||
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
|
||||
"blocks %llu len %lu",
|
||||
(unsigned long long) block_to_free, count);
|
||||
|
@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
|
|||
if (!nr)
|
||||
continue; /* A hole */
|
||||
|
||||
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
|
||||
nr, 1)) {
|
||||
if (!ext4_inode_block_valid(inode, nr, 1)) {
|
||||
EXT4_ERROR_INODE(inode,
|
||||
"invalid indirect mapped "
|
||||
"block %lu (level %d)",
|
||||
|
|
|
@ -384,8 +384,7 @@ static int __check_block_validity(struct inode *inode, const char *func,
|
|||
(inode->i_ino ==
|
||||
le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
|
||||
return 0;
|
||||
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
|
||||
map->m_len)) {
|
||||
if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
|
||||
ext4_error_inode(inode, func, line, map->m_pblk,
|
||||
"lblock %lu mapped to illegal pblock %llu "
|
||||
"(length %d)", (unsigned long) map->m_lblk,
|
||||
|
@ -4747,7 +4746,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
|||
|
||||
ret = 0;
|
||||
if (ei->i_file_acl &&
|
||||
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
|
||||
!ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
|
||||
ext4_error_inode(inode, function, line, 0,
|
||||
"iget: bad extended attribute block %llu",
|
||||
ei->i_file_acl);
|
||||
|
|
|
@ -2992,7 +2992,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
|
||||
|
||||
len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
|
||||
if (!ext4_data_block_valid(sbi, block, len)) {
|
||||
if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
|
||||
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
|
||||
"fs metadata", block, block+len);
|
||||
/* File system mounted not to panic on error
|
||||
|
@ -4759,7 +4759,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
|
|||
|
||||
sbi = EXT4_SB(sb);
|
||||
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
|
||||
!ext4_data_block_valid(sbi, block, count)) {
|
||||
!ext4_inode_block_valid(inode, block, count)) {
|
||||
ext4_error(sb, "Freeing blocks not in datazone - "
|
||||
"block = %llu, count = %lu", block, count);
|
||||
goto error_return;
|
||||
|
|
|
@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
|
|||
ext4_match(dir, fname, de)) {
|
||||
/* found a match - just to be sure, do
|
||||
* a full check */
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
|
||||
bh->b_size, offset))
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
|
||||
buf_size, offset))
|
||||
return -1;
|
||||
*res_dir = de;
|
||||
return 1;
|
||||
|
@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
|||
blocksize, hinfo, map);
|
||||
map -= count;
|
||||
dx_sort_map(map, count);
|
||||
/* Split the existing block in the middle, size-wise */
|
||||
/* Ensure that neither split block is over half full */
|
||||
size = 0;
|
||||
move = 0;
|
||||
for (i = count-1; i >= 0; i--) {
|
||||
|
@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
|||
size += map[i].size;
|
||||
move++;
|
||||
}
|
||||
/* map index at which we will split */
|
||||
/*
|
||||
* map index at which we will split
|
||||
*
|
||||
* If the sum of active entries didn't exceed half the block size, just
|
||||
* split it in half by count; each resulting block will have at least
|
||||
* half the space free.
|
||||
*/
|
||||
if (i > 0)
|
||||
split = count - move;
|
||||
else
|
||||
split = count/2;
|
||||
|
||||
hash2 = map[split].hash;
|
||||
continued = hash2 == map[split - 1].hash;
|
||||
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
|
||||
|
@ -2472,7 +2482,7 @@ int ext4_generic_delete_entry(handle_t *handle,
|
|||
de = (struct ext4_dir_entry_2 *)entry_buf;
|
||||
while (i < buf_size - csum_size) {
|
||||
if (ext4_check_dir_entry(dir, NULL, de, bh,
|
||||
bh->b_data, bh->b_size, i))
|
||||
entry_buf, buf_size, i))
|
||||
return -EFSCORRUPTED;
|
||||
if (de == de_del) {
|
||||
if (pde)
|
||||
|
|
|
@ -1207,6 +1207,12 @@ retry_write:
|
|||
congestion_wait(BLK_RW_ASYNC,
|
||||
DEFAULT_IO_TIMEOUT);
|
||||
lock_page(cc->rpages[i]);
|
||||
|
||||
if (!PageDirty(cc->rpages[i])) {
|
||||
unlock_page(cc->rpages[i]);
|
||||
continue;
|
||||
}
|
||||
|
||||
clear_page_dirty_for_io(cc->rpages[i]);
|
||||
goto retry_write;
|
||||
}
|
||||
|
|
110
fs/io-wq.c
110
fs/io-wq.c
|
@ -907,13 +907,15 @@ void io_wq_cancel_all(struct io_wq *wq)
|
|||
struct io_cb_cancel_data {
|
||||
work_cancel_fn *fn;
|
||||
void *data;
|
||||
int nr_running;
|
||||
int nr_pending;
|
||||
bool cancel_all;
|
||||
};
|
||||
|
||||
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
|
||||
{
|
||||
struct io_cb_cancel_data *match = data;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* Hold the lock to avoid ->cur_work going out of scope, caller
|
||||
|
@ -924,40 +926,68 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
|
|||
!(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
|
||||
match->fn(worker->cur_work, match->data)) {
|
||||
send_sig(SIGINT, worker->task, 1);
|
||||
ret = true;
|
||||
match->nr_running++;
|
||||
}
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
return ret;
|
||||
return match->nr_running && !match->cancel_all;
|
||||
}
|
||||
|
||||
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
|
||||
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match)
|
||||
{
|
||||
struct io_wq_work_node *node, *prev;
|
||||
struct io_wq_work *work;
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
|
||||
retry:
|
||||
spin_lock_irqsave(&wqe->lock, flags);
|
||||
wq_list_for_each(node, prev, &wqe->work_list) {
|
||||
work = container_of(node, struct io_wq_work, list);
|
||||
if (!match->fn(work, match->data))
|
||||
continue;
|
||||
|
||||
wq_list_del(&wqe->work_list, node, prev);
|
||||
spin_unlock_irqrestore(&wqe->lock, flags);
|
||||
io_run_cancel(work, wqe);
|
||||
match->nr_pending++;
|
||||
if (!match->cancel_all)
|
||||
return;
|
||||
|
||||
/* not safe to continue after unlock */
|
||||
goto retry;
|
||||
}
|
||||
spin_unlock_irqrestore(&wqe->lock, flags);
|
||||
}
|
||||
|
||||
static void io_wqe_cancel_running_work(struct io_wqe *wqe,
|
||||
struct io_cb_cancel_data *match)
|
||||
{
|
||||
rcu_read_lock();
|
||||
io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
||||
void *data, bool cancel_all)
|
||||
{
|
||||
struct io_cb_cancel_data match = {
|
||||
.fn = cancel,
|
||||
.data = data,
|
||||
.cancel_all = cancel_all,
|
||||
};
|
||||
int node;
|
||||
|
||||
/*
|
||||
* First check pending list, if we're lucky we can just remove it
|
||||
* from there. CANCEL_OK means that the work is returned as-new,
|
||||
* no completion will be posted for it.
|
||||
*/
|
||||
spin_lock_irqsave(&wqe->lock, flags);
|
||||
wq_list_for_each(node, prev, &wqe->work_list) {
|
||||
work = container_of(node, struct io_wq_work, list);
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
||||
if (match->fn(work, match->data)) {
|
||||
wq_list_del(&wqe->work_list, node, prev);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&wqe->lock, flags);
|
||||
|
||||
if (found) {
|
||||
io_run_cancel(work, wqe);
|
||||
io_wqe_cancel_pending_work(wqe, &match);
|
||||
if (match.nr_pending && !match.cancel_all)
|
||||
return IO_WQ_CANCEL_OK;
|
||||
}
|
||||
|
||||
|
@ -967,31 +997,19 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
|
|||
* as an indication that we attempt to signal cancellation. The
|
||||
* completion will run normally in this case.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
|
||||
rcu_read_unlock();
|
||||
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
|
||||
}
|
||||
|
||||
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
||||
void *data)
|
||||
{
|
||||
struct io_cb_cancel_data match = {
|
||||
.fn = cancel,
|
||||
.data = data,
|
||||
};
|
||||
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
|
||||
int node;
|
||||
|
||||
for_each_node(node) {
|
||||
struct io_wqe *wqe = wq->wqes[node];
|
||||
|
||||
ret = io_wqe_cancel_work(wqe, &match);
|
||||
if (ret != IO_WQ_CANCEL_NOTFOUND)
|
||||
break;
|
||||
io_wqe_cancel_running_work(wqe, &match);
|
||||
if (match.nr_running && !match.cancel_all)
|
||||
return IO_WQ_CANCEL_RUNNING;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (match.nr_running)
|
||||
return IO_WQ_CANCEL_RUNNING;
|
||||
if (match.nr_pending)
|
||||
return IO_WQ_CANCEL_OK;
|
||||
return IO_WQ_CANCEL_NOTFOUND;
|
||||
}
|
||||
|
||||
static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
|
||||
|
@ -1001,21 +1019,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
|
|||
|
||||
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
|
||||
{
|
||||
return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
|
||||
}
|
||||
|
||||
static bool io_wq_pid_match(struct io_wq_work *work, void *data)
|
||||
{
|
||||
pid_t pid = (pid_t) (unsigned long) data;
|
||||
|
||||
return work->task_pid == pid;
|
||||
}
|
||||
|
||||
enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
|
||||
{
|
||||
void *data = (void *) (unsigned long) pid;
|
||||
|
||||
return io_wq_cancel_cb(wq, io_wq_pid_match, data);
|
||||
return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
|
||||
}
|
||||
|
||||
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||
|
|
|
@ -129,12 +129,11 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
|||
|
||||
void io_wq_cancel_all(struct io_wq *wq);
|
||||
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
|
||||
enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
|
||||
|
||||
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
||||
|
||||
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
||||
void *data);
|
||||
void *data, bool cancel_all);
|
||||
|
||||
struct task_struct *io_wq_get_task(struct io_wq *wq);
|
||||
|
||||
|
|
|
@ -5023,7 +5023,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
|
|||
enum io_wq_cancel cancel_ret;
|
||||
int ret = 0;
|
||||
|
||||
cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
|
||||
cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
|
||||
switch (cancel_ret) {
|
||||
case IO_WQ_CANCEL_OK:
|
||||
ret = 0;
|
||||
|
@ -7659,6 +7659,33 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
|
|||
return found;
|
||||
}
|
||||
|
||||
static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
|
||||
{
|
||||
return io_match_link(container_of(work, struct io_kiocb, work), data);
|
||||
}
|
||||
|
||||
static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
{
|
||||
enum io_wq_cancel cret;
|
||||
|
||||
/* cancel this particular work, if it's running */
|
||||
cret = io_wq_cancel_work(ctx->io_wq, &req->work);
|
||||
if (cret != IO_WQ_CANCEL_NOTFOUND)
|
||||
return;
|
||||
|
||||
/* find links that hold this pending, cancel those */
|
||||
cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
|
||||
if (cret != IO_WQ_CANCEL_NOTFOUND)
|
||||
return;
|
||||
|
||||
/* if we have a poll link holding this pending, cancel that */
|
||||
if (io_poll_remove_link(ctx, req))
|
||||
return;
|
||||
|
||||
/* final option, timeout link is holding this req pending */
|
||||
io_timeout_remove_link(ctx, req);
|
||||
}
|
||||
|
||||
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
||||
struct files_struct *files)
|
||||
{
|
||||
|
@ -7708,10 +7735,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
|||
continue;
|
||||
}
|
||||
} else {
|
||||
io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
|
||||
/* could be a link, check and remove if it is */
|
||||
if (!io_poll_remove_link(ctx, cancel_req))
|
||||
io_timeout_remove_link(ctx, cancel_req);
|
||||
/* cancel this request, or head link requests */
|
||||
io_attempt_cancel(ctx, cancel_req);
|
||||
io_put_req(cancel_req);
|
||||
}
|
||||
|
||||
|
@ -7720,6 +7745,13 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
static bool io_cancel_pid_cb(struct io_wq_work *work, void *data)
|
||||
{
|
||||
pid_t pid = (pid_t) (unsigned long) data;
|
||||
|
||||
return work->task_pid == pid;
|
||||
}
|
||||
|
||||
static int io_uring_flush(struct file *file, void *data)
|
||||
{
|
||||
struct io_ring_ctx *ctx = file->private_data;
|
||||
|
@ -7729,8 +7761,11 @@ static int io_uring_flush(struct file *file, void *data)
|
|||
/*
|
||||
* If the task is going away, cancel work it may have pending
|
||||
*/
|
||||
if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
|
||||
io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
|
||||
if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
|
||||
void *data = (void *) (unsigned long)task_pid_vnr(current);
|
||||
|
||||
io_wq_cancel_cb(ctx->io_wq, io_cancel_pid_cb, data, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
|
|||
int ret;
|
||||
|
||||
/* Buffer got discarded which means block device got invalidated */
|
||||
if (!buffer_mapped(bh))
|
||||
if (!buffer_mapped(bh)) {
|
||||
unlock_buffer(bh);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
trace_jbd2_write_superblock(journal, write_flags);
|
||||
if (!(journal->j_flags & JBD2_BARRIER))
|
||||
|
|
|
@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
|
|||
int ret;
|
||||
uint32_t now = JFFS2_NOW();
|
||||
|
||||
mutex_lock(&f->sem);
|
||||
for (fd = f->dents ; fd; fd = fd->next) {
|
||||
if (fd->ino)
|
||||
if (fd->ino) {
|
||||
mutex_unlock(&f->sem);
|
||||
return -ENOTEMPTY;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&f->sem);
|
||||
|
||||
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
|
||||
dentry->d_name.len, f, now);
|
||||
|
|
|
@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
|
|||
size_t limit;
|
||||
|
||||
limit = romfs_maxsize(sb);
|
||||
if (pos >= limit)
|
||||
if (pos >= limit || buflen > limit - pos)
|
||||
return -EIO;
|
||||
if (buflen > limit - pos)
|
||||
buflen = limit - pos;
|
||||
|
||||
#ifdef CONFIG_ROMFS_ON_MTD
|
||||
if (sb->s_mtd)
|
||||
|
|
|
@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
|
|||
{
|
||||
sigset_t mask;
|
||||
|
||||
if (sizemask != sizeof(sigset_t) ||
|
||||
copy_from_user(&mask, user_mask, sizeof(mask)))
|
||||
if (sizemask != sizeof(sigset_t))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&mask, user_mask, sizeof(mask)))
|
||||
return -EFAULT;
|
||||
return do_signalfd4(ufd, &mask, flags);
|
||||
}
|
||||
|
||||
|
@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
|
|||
{
|
||||
sigset_t mask;
|
||||
|
||||
if (sizemask != sizeof(sigset_t) ||
|
||||
copy_from_user(&mask, user_mask, sizeof(mask)))
|
||||
if (sizemask != sizeof(sigset_t))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&mask, user_mask, sizeof(mask)))
|
||||
return -EFAULT;
|
||||
return do_signalfd4(ufd, &mask, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,9 +32,11 @@ xfs_sysfs_init(
|
|||
struct xfs_kobj *parent_kobj,
|
||||
const char *name)
|
||||
{
|
||||
struct kobject *parent;
|
||||
|
||||
parent = parent_kobj ? &parent_kobj->kobject : NULL;
|
||||
init_completion(&kobj->complete);
|
||||
return kobject_init_and_add(&kobj->kobject, ktype,
|
||||
&parent_kobj->kobject, "%s", name);
|
||||
return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user