This is the 4.14.172 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5ZM8sACgkQONu9yGCS
 aT61sQ//VYIHq9fdzKuD9px1gOoXdbHliFq+Xl5TtS6LMj+NaGParvLC7WKh1ANL
 2PypjozzW82XMW6xGr2QB9fiy9fiEUB6v0TM+kV58Yh3S6OMTftqMng2QjvTpvnI
 GvQ4QrgnsVXr1S9no1+UI9gM+44S/0V5Fg0RYK0dwpQOYZJ3alIvKAgk+7Kc8upq
 N6CUPojTd0XQm/Y6Xuer40KUL4FW48tVbWGXoptFR+R76VKCfcJz+iF0cKDdtjsQ
 GhpV9OkQxQFNdBgcr+GfybqEsSNWGgWKboI/ax/Rfm88H1+E6gBrJbST6JHQ3Sm8
 7k8rFpQiwr49WhKubpJCkydCIsEa1qUX0hrBsFZnr4i/r5i+QrcixIfairtBrzcp
 lNe0jYYCTP1K9uXtQy45+46/rKx4sij/9VU7UmizSD+HqzfiJ1Zo75PqJsI7i5pl
 6E0ftDXfZ/pFIlHS9Jfdj+IazsqKEGd0GQkoXE5O49Op+AhlU8znVYhpNnnW18y/
 c0rdRr2nydFdANEhnT1vpvixMdjNVl/Nbj09f8x2ngWchc1HPggMMdpg5F16SdG9
 NFBmIYmWBm5eP82p7nxwyFL3xoG8jtQ0B8e4wWFWbSobBYIMYZCB6640Gzp4MC1c
 41zLKwCVFVn60TVqqGqF8fFumMElE9FBjLMAKp4v8mpfl/o23NY=
 =BFpw
 -----END PGP SIGNATURE-----

Merge tag 'v4.14.172' into v4.14/standard/base

This is the 4.14.172 stable release

# gpg: Signature made Fri 28 Feb 2020 10:37:47 AM EST
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2020-04-14 13:03:22 -04:00
commit 7383668954
246 changed files with 1768 additions and 8979 deletions

View File

@ -6890,7 +6890,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
L: intel-gfx@lists.freedesktop.org
W: https://01.org/linuxgraphics/
B: https://01.org/linuxgraphics/documentation/how-report-bugs
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
C: irc://chat.freenode.net/intel-gfx
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 171
SUBLEVEL = 172
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -1533,12 +1533,10 @@ config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
default y if CPU_THUMBONLY
select ARM_ASM_UNIFIED
select ARM_UNWIND
help
By enabling this option, the kernel will be compiled in
Thumb-2 mode. A compiler/assembler that understand the unified
ARM-Thumb syntax is needed.
Thumb-2 mode.
If unsure, say N.
@ -1573,9 +1571,6 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
Unless you are sure your tools don't have this problem, say Y.
config ARM_ASM_UNIFIED
bool
config ARM_PATCH_IDIV
bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
depends on CPU_32v7 && !XIP_KERNEL
@ -2010,7 +2005,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
depends on !CPU_V7M
depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your

View File

@ -115,9 +115,11 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
# Accept old syntax despite ".syntax unified"
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
@ -125,7 +127,7 @@ ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
else
CFLAGS_ISA :=$(call cc-option,-marm,)
CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA)
endif

View File

@ -587,7 +587,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
bus-width = <4>;
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
disable-wp;
vmmc-supply = <&reg_3p3v_sd>;
vqmmc-supply = <&reg_3p3v>;
status = "okay";
@ -598,7 +598,7 @@
pinctrl-0 = <&pinctrl_usdhc3>;
bus-width = <4>;
cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
disable-wp;
vmmc-supply = <&reg_3p3v_sd>;
vqmmc-supply = <&reg_3p3v>;
status = "okay";
@ -1001,7 +1001,6 @@
MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x40010040
MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x40010040
>;
};
@ -1014,7 +1013,6 @@
MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x40010040
MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x40010040
>;

View File

@ -67,6 +67,14 @@
<0xf0000100 0x100>;
};
timer@f0000200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xf0000200 0x100>;
interrupts = <GIC_PPI 11
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cpg_clocks R8A7779_CLK_ZS>;
};
timer@f0000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xf0000600 0x20>;

View File

@ -20,8 +20,10 @@
#ifndef __ASM_UNIFIED_H
#define __ASM_UNIFIED_H
#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
#if defined(__ASSEMBLY__)
.syntax unified
#else
__asm__(".syntax unified");
#endif
#ifdef CONFIG_CPU_V7M
@ -64,77 +66,4 @@
#endif /* CONFIG_THUMB2_KERNEL */
#ifndef CONFIG_ARM_ASM_UNIFIED
/*
* If the unified assembly syntax isn't used (in ARM mode), these
* macros expand to an empty string
*/
#ifdef __ASSEMBLY__
.macro it, cond
.endm
.macro itt, cond
.endm
.macro ite, cond
.endm
.macro ittt, cond
.endm
.macro itte, cond
.endm
.macro itet, cond
.endm
.macro itee, cond
.endm
.macro itttt, cond
.endm
.macro ittte, cond
.endm
.macro ittet, cond
.endm
.macro ittee, cond
.endm
.macro itett, cond
.endm
.macro itete, cond
.endm
.macro iteet, cond
.endm
.macro iteee, cond
.endm
#else /* !__ASSEMBLY__ */
__asm__(
" .macro it, cond\n"
" .endm\n"
" .macro itt, cond\n"
" .endm\n"
" .macro ite, cond\n"
" .endm\n"
" .macro ittt, cond\n"
" .endm\n"
" .macro itte, cond\n"
" .endm\n"
" .macro itet, cond\n"
" .endm\n"
" .macro itee, cond\n"
" .endm\n"
" .macro itttt, cond\n"
" .endm\n"
" .macro ittte, cond\n"
" .endm\n"
" .macro ittet, cond\n"
" .endm\n"
" .macro ittee, cond\n"
" .endm\n"
" .macro itett, cond\n"
" .endm\n"
" .macro itete, cond\n"
" .endm\n"
" .macro iteet, cond\n"
" .endm\n"
" .macro iteee, cond\n"
" .endm\n");
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_ARM_ASM_UNIFIED */
#endif /* !__ASM_UNIFIED_H */

View File

@ -788,6 +788,8 @@
interrupts = <0 138 0>;
phys = <&hsusb_phy2>;
phy-names = "usb2-phy";
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
};
};
@ -817,6 +819,8 @@
interrupts = <0 131 0>;
phys = <&hsusb_phy1>, <&ssusb_phy_0>;
phy-names = "usb2-phy", "usb3-phy";
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
};
};
};

View File

@ -30,13 +30,16 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
#define ALTINSTR_ENTRY(feature,cb) \
#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
" .if " __stringify(cb) " == 0\n" \
" .word 663f - .\n" /* new instruction */ \
" .else\n" \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
#define ALTINSTR_ENTRY_CB(feature, cb) \
" .word 661b - .\n" /* label */ \
" .word " __stringify(cb) "- .\n" /* callback */ \
" .endif\n" \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
@ -57,15 +60,14 @@ void apply_alternatives(void *start, size_t length);
*
* Alternatives with callbacks do not generate replacement instructions.
*/
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature,cb) \
ALTINSTR_ENTRY(feature) \
".popsection\n" \
" .if " __stringify(cb) " == 0\n" \
".pushsection .altinstr_replacement, \"a\"\n" \
"663:\n\t" \
newinstr "\n" \
@ -73,17 +75,25 @@ void apply_alternatives(void *start, size_t length);
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".else\n\t" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY_CB(feature, cb) \
".popsection\n" \
"663:\n\t" \
"664:\n\t" \
".endif\n" \
".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, cb) \
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
#else
#include <asm/assembler.h>

View File

@ -41,9 +41,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
@ -1134,17 +1132,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{},
};
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
.desc = #cap, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
.matches = has_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
.sign = s, \
.min_field_value = min_value, \
#define __HWCAP_CAP(name, cap_type, cap) \
.desc = name, \
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.hwcap_type = cap_type, \
.hwcap = cap, \
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
#define HWCAP_CAP_MATCH(match, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
.matches = match, \
}
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
@ -1177,8 +1188,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{},
};
#ifdef CONFIG_COMPAT
static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
{
/*
* Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
* in line with that of arm32 as in vfp_init(). We make sure that the
* check is future proof, by making sure value is non-zero.
*/
u32 mvfr1;
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
else
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
}
#endif
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),

View File

@ -206,8 +206,19 @@ void fpsimd_preserve_current_state(void)
*/
void fpsimd_restore_current_state(void)
{
if (!system_supports_fpsimd())
/*
* For the tasks that were created before we detected the absence of
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
* e.g, init. This could be then inherited by the children processes.
* If we later detect that the system doesn't support FP/SIMD,
* we must clear the flag for all the tasks to indicate that the
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
* do_notify_resume().
*/
if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
}
local_bh_disable();
@ -229,7 +240,7 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct fpsimd_state *state)
{
if (!system_supports_fpsimd())
if (WARN_ON(!system_supports_fpsimd()))
return;
local_bh_disable();

View File

@ -354,6 +354,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD))
return;
/*
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
if (cpu_have_feature(cpu_feature(SSBS)))
return;
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))

View File

@ -624,6 +624,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
return 0;
}
static int fpr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!system_supports_fpsimd())
return -ENODEV;
return regset->n;
}
/*
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
*/
@ -634,6 +641,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
struct user_fpsimd_state *uregs;
uregs = &target->thread.fpsimd_state.user_fpsimd;
if (!system_supports_fpsimd())
return -EINVAL;
if (target == current)
fpsimd_preserve_current_state();
@ -648,6 +658,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
struct user_fpsimd_state newstate =
target->thread.fpsimd_state.user_fpsimd;
if (!system_supports_fpsimd())
return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
return ret;
@ -740,6 +753,7 @@ static const struct user_regset aarch64_regsets[] = {
*/
.size = sizeof(u32),
.align = sizeof(u32),
.active = fpr_active,
.get = fpr_get,
.set = fpr_set
},
@ -914,6 +928,9 @@ static int compat_vfp_get(struct task_struct *target,
compat_ulong_t fpscr;
int ret, vregs_end_pos;
if (!system_supports_fpsimd())
return -EINVAL;
uregs = &target->thread.fpsimd_state.user_fpsimd;
if (target == current)
@ -947,6 +964,9 @@ static int compat_vfp_set(struct task_struct *target,
compat_ulong_t fpscr;
int ret, vregs_end_pos;
if (!system_supports_fpsimd())
return -EINVAL;
uregs = &target->thread.fpsimd_state.user_fpsimd;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
@ -1004,6 +1024,7 @@ static const struct user_regset aarch32_regsets[] = {
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
.size = sizeof(compat_ulong_t),
.align = sizeof(compat_ulong_t),
.active = fpr_active,
.get = compat_vfp_get,
.set = compat_vfp_set
},

View File

@ -92,6 +92,7 @@ static inline void __disable_dcache_nomsr(void)
#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
do { \
int align = ~(cache_line_length - 1); \
if (start < UINT_MAX - cache_size) \
end = min(start + cache_size, end); \
start &= align; \
} while (0)

View File

@ -31,6 +31,9 @@ static int __init loongson3_platform_init(void)
continue;
pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
if (!pdev)
return -ENOMEM;
pdev->name = loongson_sysconf.sensors[i].name;
pdev->id = loongson_sysconf.sensors[i].id;
pdev->dev.platform_data = &loongson_sysconf.sensors[i];

View File

@ -520,12 +520,6 @@ static void *eeh_rmv_device(void *data, void *userdata)
pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
edev->pdev = NULL;
/*
* We have to set the VF PE number to invalid one, which is
* required to plug the VF successfully.
*/
pdn->pe_number = IODA_INVALID_PE;
#endif
if (rmv_data)
list_add(&edev->rmv_list, &rmv_data->edev_list);

View File

@ -261,9 +261,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
continue;
#ifdef CONFIG_EEH
/* Release EEH device for the VF */
/*
* Release EEH state for this VF. The PCI core
* has already torn down the pci_dev for this VF, but
* we're responsible to removing the eeh_dev since it
* has the same lifetime as the pci_dn that spawned it.
*/
edev = pdn_to_eeh_dev(pdn);
if (edev) {
/*
* We allocate pci_dn's for the totalvfs count,
* but only only the vfs that were activated
* have a configured PE.
*/
if (edev->pe)
eeh_rmv_from_parent_pe(edev);
pdn->edev = NULL;
kfree(edev);
}

View File

@ -1523,6 +1523,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
/* Reserve PE for each VF */
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
struct pci_dn *vf_pdn;
if (pdn->m64_single_mode)
pe_num = pdn->pe_num_map[vf_index];
else
@ -1535,13 +1539,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pe->pbus = NULL;
pe->parent_dev = pdev;
pe->mve_number = -1;
pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
pci_iov_virtfn_devfn(pdev, vf_index);
pe->rid = (vf_bus << 8) | vf_devfn;
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
hose->global_number, pdev->bus->number,
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
if (pnv_ioda_configure_pe(phb, pe)) {
/* XXX What do we do here ? */
@ -1555,6 +1557,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
list_add_tail(&pe->list, &phb->ioda.pe_list);
mutex_unlock(&phb->ioda.pe_list_mutex);
/* associate this pe to it's pdn */
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
if (vf_pdn->busno == vf_bus &&
vf_pdn->devfn == vf_devfn) {
vf_pdn->pe_number = pe_num;
break;
}
}
pnv_pci_ioda2_setup_dma_pe(phb, pe);
}
}

View File

@ -978,16 +978,12 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
struct pnv_phb *phb = hose->private_data;
#ifdef CONFIG_PCI_IOV
struct pnv_ioda_pe *pe;
struct pci_dn *pdn;
/* Fix the VF pdn PE number */
if (pdev->is_virtfn) {
pdn = pci_get_pdn(pdev);
WARN_ON(pdn->pe_number != IODA_INVALID_PE);
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
if (pe->rid == ((pdev->bus->number << 8) |
(pdev->devfn & 0xff))) {
pdn->pe_number = pe->pe_number;
pe->pdev = pdev;
break;
}

View File

@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
if (PAGE_DEFAULT_KEY)
if (PAGE_DEFAULT_KEY != 0)
__storage_key_init_range(start, end);
}

View File

@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
static inline unsigned long long get_tod_clock(void)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
char clk[STORE_CLOCK_EXT_SIZE];
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);

View File

@ -25,6 +25,12 @@ ENTRY(ftrace_stub)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
#ifdef __PACK_STACK
/* allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
#else
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
#endif
ENTRY(_mcount)
BR_EX %r14
@ -38,9 +44,16 @@ ENTRY(ftrace_caller)
#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
#endif
aghi %r15,-STACK_FRAME_SIZE
# allocate stack frame for ftrace_caller to contain traced function
aghi %r15,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(__SF_GPRS+8*8)(%r15)
stg %r15,(__SF_GPRS+9*8)(%r15)
# allocate pt_regs and stack frame for ftrace_trace_function
aghi %r15,-STACK_FRAME_SIZE
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
aghi %r1,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

View File

@ -1913,7 +1913,7 @@ static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
return -EINVAL;
if (!test_kvm_facility(kvm, 72))
return -ENOTSUPP;
return -EOPNOTSUPP;
mutex_lock(&fi->ais_lock);
ais.simm = fi->simm;
@ -2214,7 +2214,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
int ret = 0;
if (!test_kvm_facility(kvm, 72))
return -ENOTSUPP;
return -EOPNOTSUPP;
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
return -EFAULT;
@ -2294,7 +2294,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
struct kvm_s390_ais_all ais;
if (!test_kvm_facility(kvm, 72))
return -ENOTSUPP;
return -EOPNOTSUPP;
if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
return -EFAULT;

View File

@ -78,8 +78,15 @@ enum {
GPIO_FN_WDTOVF,
/* CAN */
GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
GPIO_FN_CTX2, GPIO_FN_CRX2,
GPIO_FN_CTX1, GPIO_FN_CRX1,
GPIO_FN_CTX0, GPIO_FN_CRX0,
GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
/* DMAC */
GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,

View File

@ -167,12 +167,14 @@ SECTIONS
}
PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL
. = ALIGN(PAGE_SIZE);
.exit.text : {
EXIT_TEXT
}
#endif
.exit.data : {
EXIT_DATA
}
. = ALIGN(PAGE_SIZE);
__init_end = .;

View File

@ -11,6 +11,7 @@
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/mm_types.h>
#include <linux/elf.h>
#include <asm/processor.h>
#include <asm/vdso.h>

View File

@ -245,6 +245,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,

View File

@ -1368,6 +1368,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);
local64_set(&hwc->period_left, -new);
perf_event_update_userpage(event);
return 0;

View File

@ -1006,7 +1006,7 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(struct kvm_vcpu *vcpu);

View File

@ -1116,9 +1116,12 @@ static const struct sysfs_ops threshold_ops = {
.store = store,
};
static void threshold_block_release(struct kobject *kobj);
static struct kobj_type threshold_ktype = {
.sysfs_ops = &threshold_ops,
.default_attrs = default_attrs,
.release = threshold_block_release,
};
static const char *get_name(unsigned int bank, struct threshold_block *b)
@ -1151,8 +1154,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return buf_mcatype;
}
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
unsigned int block, u32 address)
static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
unsigned int bank, unsigned int block,
u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@ -1196,16 +1200,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
INIT_LIST_HEAD(&b->miscj);
if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
list_add(&b->miscj,
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
} else {
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
}
if (tb->blocks)
list_add(&b->miscj, &tb->blocks->miscj);
else
tb->blocks = b;
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj,
get_name(bank, b));
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
if (err)
goto out_free;
recurse:
@ -1213,7 +1213,7 @@ recurse:
if (!address)
return 0;
err = allocate_threshold_blocks(cpu, bank, block, address);
err = allocate_threshold_blocks(cpu, tb, bank, block, address);
if (err)
goto out_free;
@ -1298,8 +1298,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out_free;
}
per_cpu(threshold_banks, cpu)[bank] = b;
if (is_shared_bank(bank)) {
refcount_set(&b->cpus, 1);
@ -1310,9 +1308,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
}
err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
if (!err)
goto out;
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
if (err)
goto out_free;
per_cpu(threshold_banks, cpu)[bank] = b;
return 0;
out_free:
kfree(b);
@ -1321,8 +1323,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
return err;
}
static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank)
static void threshold_block_release(struct kobject *kobj)
{
kfree(to_block(kobj));
}
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
{
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
@ -1332,13 +1338,11 @@ static void deallocate_threshold_block(unsigned int cpu,
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
kobject_put(&pos->kobj);
list_del(&pos->miscj);
kfree(pos);
kobject_put(&pos->kobj);
}
kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
kobject_put(&head->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)

View File

@ -94,11 +94,11 @@ __init int create_simplefb(const struct screen_info *si,
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
size <<= 16;
length = mode->height * mode->stride;
length = PAGE_ALIGN(length);
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
length = PAGE_ALIGN(length);
/* setup IORESOURCE_MEM as framebuffer memory */
memset(&res, 0, sizeof(res));

View File

@ -291,13 +291,18 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
{
switch (func) {
case 0:
entry->eax = 1; /* only one leaf currently */
entry->eax = 7;
++*nent;
break;
case 1:
entry->ecx = F(MOVBE);
++*nent;
break;
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
if (index == 0)
entry->ecx = F(RDPID);
++*nent;
default:
break;
}

View File

@ -3539,6 +3539,16 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{
u64 tsc_aux = 0;
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
return emulate_gp(ctxt, 0);
ctxt->dst.val = tsc_aux;
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
@ -4431,10 +4441,20 @@ static const struct opcode group8[] = {
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
/*
* The "memory" destination is actually always a register, since we come
* from the register case of group9.
*/
static const struct gprefix pfx_0f_c7_7 = {
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
N, N, N, N, N, N, N,
GP(0, &pfx_0f_c7_7),
} };
static const struct opcode group11[] = {

View File

@ -427,7 +427,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
irq.dest_id, irq.dest_mode))
__set_bit(irq.vector, ioapic_handled_vectors);
}

View File

@ -566,9 +566,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
if (pv_eoi_get_user(vcpu, &val) < 0) {
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
return false;
}
return val & 0x1;
}
@ -993,11 +995,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic_clear_vector(vector, apic->regs + APIC_TMR);
}
if (vcpu->arch.apicv_active)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}

View File

@ -4631,8 +4631,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
{
if (!vcpu->arch.apicv_active)
return -1;
kvm_lapic_set_irr(vec, vcpu->arch.apic);
smp_mb__after_atomic();
@ -4641,6 +4644,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_cpu_get_apicid(vcpu->cpu));
else
kvm_vcpu_wake_up(vcpu);
return 0;
}
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)

View File

@ -4597,6 +4597,9 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int get_ept_level(struct kvm_vcpu *vcpu)
{
/* Nested EPT currently only supports 4-level walks. */
if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
return 4;
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5;
return 4;
@ -4988,6 +4991,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
(ss.selector & SEGMENT_RPL_MASK));
}
static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
unsigned int port, int size);
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification;
unsigned short port;
int size;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
return nested_vmx_check_io_bitmaps(vcpu, port, size);
}
/*
* Check if guest state is valid. Returns true if valid, false if
* not.
@ -5518,24 +5541,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
* 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry.
*/
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
if (!r)
return;
return 0;
if (!vcpu->arch.apicv_active)
return -1;
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return;
return 0;
/* If a previous notification has sent the IPI, nothing to do. */
if (pi_test_and_set_on(&vmx->pi_desc))
return;
return 0;
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
}
/*
@ -8518,23 +8546,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
/*
* Return true if an IO instruction with the specified port and size should cause
* a VM-exit into L1.
*/
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
int size)
{
unsigned long exit_qualification;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gpa_t bitmap, last_bitmap;
unsigned int port;
int size;
u8 b;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
last_bitmap = (gpa_t)-1;
b = -1;
@ -12318,11 +12340,71 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
}
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned short port;
bool intercept;
int size;
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
port = info->src_val;
size = info->dst_bytes;
} else {
port = info->dst_val;
size = info->src_bytes;
}
/*
* If the 'use IO bitmaps' VM-execution control is 0, IO instruction
* VM-exits depend on the 'unconditional IO exiting' VM-execution
* control.
*
* Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
*/
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
intercept = nested_cpu_has(vmcs12,
CPU_BASED_UNCOND_IO_EXITING);
else
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
switch (info->intercept) {
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
*/
case x86_intercept_rdtscp:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
ctxt->exception.vector = UD_VECTOR;
ctxt->exception.error_code_valid = false;
return X86EMUL_PROPAGATE_FAULT;
}
break;
case x86_intercept_in:
case x86_intercept_ins:
case x86_intercept_out:
case x86_intercept_outs:
return vmx_check_intercept_io(vcpu, info);
/* TODO: check more intercepts... */
default:
break;
}
return X86EMUL_UNHANDLEABLE;
}
#ifdef CONFIG_X86_64

File diff suppressed because it is too large Load Diff

View File

@ -909,7 +909,7 @@ EndTable
GrpTable: Grp3_2
0: TEST Ev,Iz
1:
1: TEST Ev,Iz
2: NOT Ev
3: NEG Ev
4: MUL rAX,Ev

View File

@ -2077,19 +2077,13 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(0),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
.flags = 0,
};
if (!(__supported_pte_mask & _PAGE_NX))
goto out;
if (!(page_flags & _PAGE_NX))
cpa.mask_clr = __pgprot(_PAGE_NX);
if (!(page_flags & _PAGE_RW))
cpa.mask_clr = __pgprot(_PAGE_RW);
if (!(page_flags & _PAGE_ENC))
cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);

View File

@ -480,7 +480,6 @@ void __init efi_init(void)
efi_char16_t *c16;
char vendor[100] = "unknown";
int i = 0;
void *tmp;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@ -505,14 +504,16 @@ void __init efi_init(void)
/*
* Show what we know for posterity
*/
c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
c16 = early_memremap_ro(efi.systab->fw_vendor,
sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = *c16++;
for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
vendor[i] = c16[i];
vendor[i] = '\0';
} else
early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
} else {
pr_err("Could not map the firmware vendor!\n");
early_memunmap(tmp, 2);
}
pr_info("EFI v%u.%.02u by %s\n",
efi.systab->hdr.revision >> 16,
@ -929,16 +930,14 @@ static void __init __efi_enter_virtual_mode(void)
if (efi_alloc_page_tables()) {
pr_err("Failed to allocate EFI page tables\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
goto err;
}
efi_merge_regions();
new_memmap = efi_map_regions(&count, &pg_shift);
if (!new_memmap) {
pr_err("Error reallocating memory, EFI runtime non-functional!\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
goto err;
}
pa = __pa(new_memmap);
@ -952,8 +951,7 @@ static void __init __efi_enter_virtual_mode(void)
if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
pr_err("Failed to remap late EFI memory map\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
goto err;
}
if (efi_enabled(EFI_DBG)) {
@ -961,12 +959,11 @@ static void __init __efi_enter_virtual_mode(void)
efi_print_memmap();
}
BUG_ON(!efi.systab);
if (WARN_ON(!efi.systab))
goto err;
if (efi_setup_page_tables(pa, 1 << pg_shift)) {
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
if (efi_setup_page_tables(pa, 1 << pg_shift))
goto err;
efi_sync_low_kernel_mappings();
@ -986,9 +983,9 @@ static void __init __efi_enter_virtual_mode(void)
}
if (status != EFI_SUCCESS) {
pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
status);
panic("EFI call to SetVirtualAddressMap() failed!");
goto err;
}
/*
@ -1015,6 +1012,10 @@ static void __init __efi_enter_virtual_mode(void)
/* clean DUMMY object */
efi_delete_dummy_variable();
return;
err:
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
void __init efi_enter_virtual_mode(void)

View File

@ -390,11 +390,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
return 0;
page = alloc_page(GFP_KERNEL|__GFP_DMA32);
if (!page)
panic("Unable to allocate EFI runtime stack < 4GB\n");
if (!page) {
pr_err("Unable to allocate EFI runtime stack < 4GB\n");
return 1;
}
efi_scratch.phys_stack = virt_to_phys(page_address(page));
efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
npages = (_etext - _text) >> PAGE_SHIFT;
text = __pa(_text);

View File

@ -273,7 +273,7 @@ cleanup:
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
* ` walk_state - Current method state
* walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status

View File

@ -444,6 +444,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
/*
* Disassembler: handle create field operators here.
*
* create_buffer_field is a deferred op that is typically processed in load
* pass 2. However, disassembly of control method contents walk the parse
* tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
* in a later walk. This is a problem when there is a control method that
* has the same name as the AML_CREATE object. In this case, any use of the
* name segment will be detected as a method call rather than a reference
* to a buffer field.
*
* This earlier creation during disassembly solves this issue by inserting
* the named object in the ACPI namespace so that references to this name
* would be a name string rather than a method call.
*/
if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
(walk_state->op_info->flags & AML_CREATE)) {
status = acpi_ds_create_buffer_field(op, walk_state);
return_ACPI_STATUS(status);
}
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {

View File

@ -88,6 +88,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@ -586,6 +587,7 @@ static struct pci_driver ahci_pci_driver = {
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ahci_remove_one,
.shutdown = ahci_shutdown_one,
.driver = {
.pm = &ahci_pci_pm_ops,
},
@ -1823,6 +1825,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
static void ahci_shutdown_one(struct pci_dev *pdev)
{
ata_pci_shutdown_one(pdev);
}
static void ahci_remove_one(struct pci_dev *pdev)
{
pm_runtime_get_noresume(&pdev->dev);

View File

@ -6706,6 +6706,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
ata_host_detach(host);
}
void ata_pci_shutdown_one(struct pci_dev *pdev)
{
struct ata_host *host = pci_get_drvdata(pdev);
int i;
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ap->pflags |= ATA_PFLAG_FROZEN;
/* Disable port interrupts */
if (ap->ops->freeze)
ap->ops->freeze(ap);
/* Stop the port DMA engines */
if (ap->ops->port_stop)
ap->ops->port_stop(ap);
}
}
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
@ -7326,6 +7346,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);

View File

@ -1496,12 +1496,14 @@ fore200e_open(struct atm_vcc *vcc)
static void
fore200e_close(struct atm_vcc* vcc)
{
struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
struct fore200e_vcc* fore200e_vcc;
struct fore200e* fore200e;
struct fore200e_vc_map* vc_map;
unsigned long flags;
ASSERT(vcc);
fore200e = FORE200E_DEV(vcc->dev);
ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
@ -1546,10 +1548,10 @@ fore200e_close(struct atm_vcc* vcc)
static int
fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
struct fore200e* fore200e;
struct fore200e_vcc* fore200e_vcc;
struct fore200e_vc_map* vc_map;
struct host_txq* txq = &fore200e->host_txq;
struct host_txq* txq;
struct host_txq_entry* entry;
struct tpd* tpd;
struct tpd_haddr tpd_haddr;
@ -1562,9 +1564,18 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned char* data;
unsigned long flags;
ASSERT(vcc);
ASSERT(fore200e);
ASSERT(fore200e_vcc);
if (!vcc)
return -EINVAL;
fore200e = FORE200E_DEV(vcc->dev);
fore200e_vcc = FORE200E_VCC(vcc);
if (!fore200e)
return -EINVAL;
txq = &fore200e->host_txq;
if (!fore200e_vcc)
return -EINVAL;
if (!test_bit(ATM_VF_READY, &vcc->flags)) {
DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);

View File

@ -375,7 +375,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
WARN_ON(!list_empty(&dev->devres_head));
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
return -EBUSY;
}
re_probe:
dev->driver = drv;

View File

@ -28,6 +28,7 @@
#include <linux/limits.h>
#include <linux/property.h>
#include <linux/kmemleak.h>
#include <linux/types.h>
#include "base.h"
#include "power/power.h"
@ -68,7 +69,7 @@ void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
int i;
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@ -163,7 +164,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
int i;
u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@ -360,7 +361,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
*/
int platform_device_add(struct platform_device *pdev)
{
int i, ret;
u32 i;
int ret;
if (!pdev)
return -EINVAL;
@ -426,7 +428,7 @@ int platform_device_add(struct platform_device *pdev)
pdev->id = PLATFORM_DEVID_AUTO;
}
while (--i >= 0) {
while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
@ -447,7 +449,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
*/
void platform_device_del(struct platform_device *pdev)
{
int i;
u32 i;
if (pdev) {
device_remove_properties(&pdev->dev);

View File

@ -529,6 +529,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
return kobj;
}
static inline void brd_check_and_reset_par(void)
{
if (unlikely(!max_part))
max_part = 1;
/*
* make sure 'max_part' can be divided exactly by (1U << MINORBITS),
* otherwise, it is possiable to get same dev_t when adding partitions.
*/
if ((1U << MINORBITS) % max_part != 0)
max_part = 1UL << fls(max_part);
if (max_part > DISK_MAX_PARTS) {
pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
DISK_MAX_PARTS, DISK_MAX_PARTS);
max_part = DISK_MAX_PARTS;
}
}
static int __init brd_init(void)
{
struct brd_device *brd, *next;
@ -552,8 +571,7 @@ static int __init brd_init(void)
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
return -EIO;
if (unlikely(!max_part))
max_part = 1;
brd_check_and_reset_par();
for (i = 0; i < rd_nr; i++) {
brd = brd_alloc(i);

View File

@ -848,14 +848,17 @@ static void reset_fdc_info(int mode)
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
unsigned int new_fdc = fdc;
if (drive >= 0 && drive < N_DRIVE) {
fdc = FDC(drive);
new_fdc = FDC(drive);
current_drive = drive;
}
if (fdc != 1 && fdc != 0) {
if (new_fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
fdc = new_fdc;
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);

View File

@ -1203,6 +1203,16 @@ static int nbd_start_device(struct nbd_device *nbd)
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
sock_shutdown(nbd);
/*
* If num_connections is m (2 < m),
* and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
* But NO.(n + 1) failed. We still have n recv threads.
* So, add flush_workqueue here to prevent recv threads
* dropping the last config_refs and trying to destroy
* the workqueue from inside the workqueue.
*/
if (i)
flush_workqueue(nbd->recv_workq);
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);

View File

@ -1598,8 +1598,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
printk_deferred(KERN_NOTICE "random: %s called from %pS "
"with crng_init=%d\n", func_name, caller,
crng_init);
}
/*

View File

@ -210,6 +210,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
clk_flags = clk_hw_get_flags(hw);
p = clk_hw_get_parent_by_index(hw, index);
if (!p)
return -EINVAL;
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
if (!rate)

View File

@ -884,11 +884,26 @@ static const struct sunxi_ccu_desc sun50i_a64_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_a64_ccu_resets),
};
static struct ccu_pll_nb sun50i_a64_pll_cpu_nb = {
.common = &pll_cpux_clk.common,
/* copy from pll_cpux_clk */
.enable = BIT(31),
.lock = BIT(28),
};
static struct ccu_mux_nb sun50i_a64_cpu_nb = {
.common = &cpux_clk.common,
.cm = &cpux_clk.mux,
.delay_us = 1, /* > 8 clock cycles at 24 MHz */
.bypass_index = 1, /* index of 24 MHz oscillator */
};
static int sun50i_a64_ccu_probe(struct platform_device *pdev)
{
struct resource *res;
void __iomem *reg;
u32 val;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, res);
@ -902,7 +917,18 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun50i_a64_pll_cpu_nb);
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun50i_a64_cpu_nb);
return 0;
}
static const struct of_device_id sun50i_a64_ccu_ids[] = {

View File

@ -134,7 +134,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
goto err_iounmap;
goto err_timer_free;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@ -143,6 +143,9 @@ static int __init bcm2835_timer_init(struct device_node *node)
return 0;
err_timer_free:
kfree(timer);
err_iounmap:
iounmap(base);
return ret;

View File

@ -103,7 +103,8 @@ config ARM_TEGRA_DEVFREQ
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
depends on ARCH_ROCKCHIP
depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
(COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT

View File

@ -33,7 +33,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
depends on ARCH_ROCKCHIP
depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.

View File

@ -192,7 +192,7 @@ __dma_device_satisfies_mask(struct dma_device *device,
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
return chan->device->dev->driver->owner;
return chan->device->owner;
}
/**
@ -928,6 +928,8 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
device->owner = device->dev->driver->owner;
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",

View File

@ -259,17 +259,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
return ret;
}
spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
uirq->refcnt++;
@ -315,8 +314,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
if (uirq->refcnt == 0)
if (uirq->refcnt == 0) {
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
free_irq(uirq->uirq, priv);
return;
}
}
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);

View File

@ -336,17 +336,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
con_obj_id =
uint8_t con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
>> OBJECT_ID_SHIFT;
con_obj_num =
(le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
>> ENUM_ID_SHIFT;
con_obj_type =
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* Skip TV/CV support */
if ((le16_to_cpu(path->usDeviceTag) ==
@ -371,14 +363,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
grph_obj_id =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
grph_obj_num =
(le16_to_cpu(path->usGraphicObjIds[j]) &
ENUM_ID_MASK) >> ENUM_ID_SHIFT;
uint8_t grph_obj_type=
grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;

View File

@ -279,7 +279,12 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;
return reference_clock;
}

View File

@ -101,8 +101,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
if (IS_ERR(source))
return PTR_ERR(source);
if (source[len] == '\n')
source[len] = '\0';
if (source[len - 1] == '\n')
source[len - 1] = '\0';
spin_lock_irq(&crc->lock);

View File

@ -486,6 +486,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
container_of(helper, struct psb_fbdev, psb_fb_helper);
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned int fb_size;
int bytespp;
bytespp = sizes->surface_bpp / 8;
@ -495,8 +496,11 @@ static int psbfb_probe(struct drm_fb_helper *helper,
/* If the mode will not fit in 32bit then switch to 16bit to get
a console on full resolution. The X mode setting server will
allocate its own 32bit GEM framebuffer */
if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
dev_priv->vram_stolen_size) {
fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
sizes->surface_height;
fb_size = ALIGN(fb_size, PAGE_SIZE);
if (fb_size > dev_priv->vram_stolen_size) {
sizes->surface_bpp = 16;
sizes->surface_depth = 16;
}

View File

@ -307,6 +307,7 @@ err_pm_runtime_put:
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
@ -328,6 +329,13 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_disp_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static void mtk_crtc_ddp_config(struct drm_crtc *crtc)

View File

@ -158,7 +158,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(fence->channel, fctx))
if (nouveau_fence_update(chan, fctx))
ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);

View File

@ -73,6 +73,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (debug > subdev->debug)
return;
if (!mthd)
return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;

View File

@ -143,23 +143,24 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
nent = (fuc.size / sizeof(struct gk20a_fw_av));
pack = vzalloc((sizeof(*pack) * max_classes) +
(sizeof(*init) * (nent + 1)));
pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
(sizeof(*init) * (nent + max_classes + 1)));
if (!pack) {
ret = -ENOMEM;
goto end;
}
init = (void *)(pack + max_classes);
init = (void *)(pack + max_classes + 1);
for (i = 0; i < nent; i++) {
struct gf100_gr_init *ent = &init[i];
for (i = 0; i < nent; i++, init++) {
struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
if (prevclass != class) {
pack[classidx].init = ent;
if (prevclass) /* Add terminator to the method list. */
init++;
pack[classidx].init = init;
pack[classidx].type = class;
prevclass = class;
if (++classidx >= max_classes) {
@ -169,10 +170,10 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
}
}
ent->addr = addr;
ent->data = av->data;
ent->count = 1;
ent->pitch = 1;
init->addr = addr;
init->data = av->data;
init->count = 1;
init->pitch = 1;
}
*ppack = pack;

View File

@ -108,6 +108,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
*psb = NULL;
acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr))
@ -116,10 +117,8 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
psb = NULL;
if (!gsb)
return -ENOMEM;
}
*psb = &gsb->base;
ret = nvkm_secboot_ctor(&gm20b_secboot, acr, device, index, &gsb->base);

View File

@ -119,6 +119,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
msleep(10);
WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));

View File

@ -210,8 +210,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
if (unlikely(ret != 0))
if (unlikely(ret != 0)) {
kfree(cres);
goto out_invalid_key;
}
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);

View File

@ -89,8 +89,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
#define LTC_NOT_BUSY BIT(5)
#define LTC_NOT_PENDING BIT(4)
#define LTC_NOT_BUSY BIT(6)
#define LTC_NOT_PENDING BIT(5)
/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which

View File

@ -65,6 +65,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
struct ide_timing t;
u8 arttim = 0;
if (drive->dn >= ARRAY_SIZE(drwtim_regs))
return;
ide_timing_compute(drive, mode, &t, T, 0);
/*

View File

@ -114,6 +114,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
struct pci_dev *dev = to_pci_dev(hwif->dev);
const u8 pio = drive->pio_mode - XFER_PIO_0;
if (drive->dn >= ARRAY_SIZE(drive_pci))
return;
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
if (svwks_csb_check(dev)) {
@ -140,6 +143,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
if (drive->dn >= ARRAY_SIZE(drive_pci2))
return;
pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
pci_read_config_byte(dev, 0x54, &ultra_enable);

View File

@ -338,22 +338,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps)
return NULL;
if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
if (!qp_pps) {
new_pps->main.port_num = qp_attr->port_num;
new_pps->main.pkey_index = qp_attr->pkey_index;
} else {
new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
qp_attr->port_num :
qp_pps->main.port_num;
new_pps->main.pkey_index =
(qp_attr_mask & IB_QP_PKEY_INDEX) ?
qp_attr->pkey_index :
qp_pps->main.pkey_index;
}
if (qp_attr_mask & IB_QP_PORT)
new_pps->main.port_num =
(qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
if (qp_attr_mask & IB_QP_PKEY_INDEX)
new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
qp_attr->pkey_index;
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
} else if (qp_pps) {
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)

View File

@ -1686,6 +1686,14 @@ static u64 access_sw_pio_drain(const struct cntr_entry *entry,
return dd->verbs_dev.n_piodrain;
}
static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = context;
return dd->ctx0_seq_drop;
}
static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
{
@ -4246,6 +4254,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_cpu_intr),
[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
access_sw_cpu_rcv_limit),
[C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
access_sw_ctx0_seq_drop),
[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
access_sw_vtx_wait),
[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,

View File

@ -864,6 +864,7 @@ enum {
C_DC_PG_STS_TX_MBE_CNT,
C_SW_CPU_INTR,
C_SW_CPU_RCV_LIM,
C_SW_CTX0_SEQ_DROP,
C_SW_VTX_WAIT,
C_SW_PIO_WAIT,
C_SW_PIO_DRAIN,

View File

@ -710,6 +710,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
packet->rcd->dd->ctx0_seq_drop++;
/* Set up for the next packet */
packet->rhqoff += packet->rsize;
if (packet->rhqoff >= packet->maxcnt)

View File

@ -195,23 +195,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (fd) {
if (!fd || init_srcu_struct(&fd->pq_srcu))
goto nomem;
spin_lock_init(&fd->pq_rcu_lock);
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd;
kobject_get(&fd->dd->kobj);
fp->private_data = fd;
} else {
return 0;
nomem:
kfree(fd);
fp->private_data = NULL;
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
return -ENOMEM;
}
return 0;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@ -417,21 +418,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
int idx;
if (!cq || !pq)
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
if (!cq || !pq) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EIO;
}
if (!iter_is_iovec(from) || !dim)
if (!iter_is_iovec(from) || !dim) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EINVAL;
}
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -ENOSPC;
}
while (dim) {
int ret;
@ -449,6 +459,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
reqs++;
}
srcu_read_unlock(&fd->pq_srcu, idx);
return reqs;
}
@ -824,6 +835,7 @@ done:
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
cleanup_srcu_struct(&fdata->pq_srcu);
kfree(fdata);
return 0;
}

View File

@ -1043,6 +1043,8 @@ struct hfi1_devdata {
char *boardname; /* human readable board info */
u64 ctx0_seq_drop;
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
@ -1353,10 +1355,13 @@ struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
struct srcu_struct pq_srcu;
struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
/* update side lock for SRCU */
spinlock_t pq_rcu_lock;
struct hfi1_user_sdma_pkt_q __rcu *pq;
u16 subctxt;
/* for cpu affinity; -1 if none */
int rec_cpu_num;

View File

@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->entry_to_rb = kcalloc(uctxt->expected_count,
sizeof(struct rb_node *),
GFP_KERNEL);

View File

@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq)
return -ENOMEM;
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
goto pq_mmu_fail;
}
fd->pq = pq;
rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
return 0;
@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
pq = fd->pq;
spin_lock(&fd->pq_rcu_lock);
pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
lockdep_is_held(&fd->pq_rcu_lock));
if (pq) {
rcu_assign_pointer(fd->pq, NULL);
spin_unlock(&fd->pq_rcu_lock);
synchronize_srcu(&fd->pq_srcu);
/* at this point there can be no more new requests */
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
iowait_sdma_drain(&pq->busy);
@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
fd->pq = NULL;
} else {
spin_unlock(&fd->pq_rcu_lock);
}
if (fd->cq) {
vfree(fd->cq->comps);
@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
{
int ret = 0, i;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_pkt_q *pq =
srcu_dereference(fd->pq, &fd->pq_srcu);
struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;

View File

@ -408,7 +408,7 @@ struct rxe_dev {
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* guard mmap_offset */
int mmap_offset;
u64 mmap_offset;
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];

View File

@ -2582,6 +2582,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
static void
isert_wait4cmds(struct iscsi_conn *conn)
{
isert_info("iscsi_conn %p\n", conn);
if (conn->sess) {
target_sess_cmd_list_set_waiting(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
}
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@ -2629,6 +2640,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);

View File

@ -149,7 +149,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0042", /* Yoga */
"LEN0045",
"LEN0047",
"LEN0049",
"LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
@ -169,9 +168,11 @@ static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
"LEN004a", /* W541 */
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
@ -182,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */

View File

@ -888,6 +888,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
{
const struct edt_i2c_chip_data *chip_data;
struct edt_ft5x06_ts_data *tsdata;
u8 buf[2] = { 0xfc, 0x00 };
struct input_dev *input;
unsigned long irq_flags;
int error;
@ -957,6 +958,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
/*
* Dummy read access. EP0700MLP1 returns bogus data on the first
* register read access and ignores writes.
*/
edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
edt_ft5x06_ts_set_regs(tsdata);
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);

View File

@ -1145,7 +1145,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
arm_smmu_sync_ste_for_sid(smmu, sid);
dst[0] = cpu_to_le64(val);
/* See comment in arm_smmu_write_ctx_desc() */
WRITE_ONCE(dst[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */

View File

@ -327,21 +327,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
return;
iommu_put_dma_cookie(domain);
/* NOTE: unmap can be called after client device is powered off,
* for example, with GPUs or anything involving dma-buf. So we
* cannot rely on the device_link. Make sure the IOMMU is on to
* avoid unclocked accesses in the TLB inv path:
if (qcom_domain->iommu) {
/*
* NOTE: unmap can be called after client device is powered
* off, for example, with GPUs or anything involving dma-buf.
* So we cannot rely on the device_link. Make sure the IOMMU
* is on to avoid unclocked accesses in the TLB inv path:
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
free_io_pgtable_ops(qcom_domain->pgtbl_ops);
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
kfree(qcom_domain);
}
@ -386,7 +384,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
unsigned i;
if (!qcom_domain->iommu)
if (WARN_ON(!qcom_domain->iommu))
return;
pm_runtime_get_sync(qcom_iommu->dev);
@ -397,8 +395,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
}
pm_runtime_put_sync(qcom_iommu->dev);
qcom_domain->iommu = NULL;
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,

View File

@ -527,7 +527,7 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
its_fixup_cmd(cmd);

View File

@ -1253,6 +1253,7 @@ static struct
struct redist_region *redist_regs;
u32 nr_redist_regions;
bool single_redist;
int enabled_rdists;
u32 maint_irq;
int maint_irq_mode;
phys_addr_t vcpu_base;
@ -1347,8 +1348,10 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
* If GICC is enabled and has valid gicr base address, then it means
* GICR base is presented via GICC
*/
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
acpi_data.enabled_rdists++;
return 0;
}
/*
* It's perfectly valid firmware can pass disabled GICC entry, driver
@ -1378,8 +1381,10 @@ static int __init gic_acpi_count_gicr_regions(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_match_gicc, 0);
if (count > 0)
if (count > 0) {
acpi_data.single_redist = true;
count = acpi_data.enabled_rdists;
}
return count;
}

View File

@ -381,6 +381,7 @@ static struct platform_driver mbigen_platform_driver = {
.name = "Hisilicon MBIGEN-V2",
.of_match_table = mbigen_of_match,
.acpi_match_table = ACPI_PTR(mbigen_acpi_match),
.suppress_bind_attrs = true,
},
.probe = mbigen_device_probe,
};

View File

@ -43,6 +43,8 @@
#define PCA963X_LED_PWM 0x2 /* Controlled through PWM */
#define PCA963X_LED_GRP_PWM 0x3 /* Controlled through PWM/GRPPWM */
#define PCA963X_MODE2_OUTDRV 0x04 /* Open-drain or totem pole */
#define PCA963X_MODE2_INVRT 0x10 /* Normal or inverted direction */
#define PCA963X_MODE2_DMBLNK 0x20 /* Enable blinking */
#define PCA963X_MODE1 0x00
@ -462,12 +464,12 @@ static int pca963x_probe(struct i2c_client *client,
PCA963X_MODE2);
/* Configure output: open-drain or totem pole (push-pull) */
if (pdata->outdrv == PCA963X_OPEN_DRAIN)
mode2 |= 0x01;
mode2 &= ~PCA963X_MODE2_OUTDRV;
else
mode2 |= 0x05;
mode2 |= PCA963X_MODE2_OUTDRV;
/* Configure direction: normal or inverted */
if (pdata->dir == PCA963X_INVERTED)
mode2 |= 0x10;
mode2 |= PCA963X_MODE2_INVRT;
i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
mode2);
}

View File

@ -381,7 +381,8 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
/* Bkey utility code */
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
(unsigned int)(i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
{

View File

@ -423,10 +423,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
if (code->index > 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
code->code = mt9v032->format.code;
return 0;
}
@ -434,7 +436,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
if (fse->index >= 3)
return -EINVAL;
if (mt9v032->format.code != fse->code)
return -EINVAL;
fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);

View File

@ -14,8 +14,8 @@
#define MAX_SRC_WIDTH 2048
/* Reset & boot poll config */
#define POLL_RST_MAX 50
#define POLL_RST_DELAY_MS 20
#define POLL_RST_MAX 500
#define POLL_RST_DELAY_MS 2
enum bdisp_target_plan {
BDISP_RGB,
@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
for (i = 0; i < POLL_RST_MAX; i++) {
if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
break;
msleep(POLL_RST_DELAY_MS);
udelay(POLL_RST_DELAY_MS * 1000);
}
if (i == POLL_RST_MAX)
dev_err(bdisp->dev, "Reset timeout\n");

View File

@ -1972,10 +1972,10 @@ static int enic_stop(struct net_device *netdev)
napi_disable(&enic->napi[i]);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);

View File

@ -2685,13 +2685,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
bool do_tstamp;
do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en;
frags = skb_shinfo(skb)->nr_frags;
/* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
if (unlikely(do_tstamp))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@ -2705,7 +2709,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (unlikely(do_tstamp)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
@ -2715,7 +2719,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
~0x7UL);

View File

@ -225,7 +225,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
if (err)
return err;
goto err_ctx_prepare;
j = 0;
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@ -257,6 +257,7 @@ start_again:
return 0;
err_entry_append:
err_entry_get:
err_ctx_prepare:
rtnl_unlock();
devlink_dpipe_entry_clear(&entry);
return err;

View File

@ -240,6 +240,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
ret = -ENOMEM;
goto free_riptr;
}
if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
dev_err(priv->dev, "MURAM allocation out of addressable range\n");
ret = -ENOMEM;
goto free_tiptr;
}
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);

View File

@ -261,7 +261,7 @@ struct port {
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
u32 desc_tab_phys;
dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@ -861,7 +861,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif

Some files were not shown because too many files have changed in this diff Show More