mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 05:15:23 +02:00
Merge tag 'v5.2.59' into v5.2/standard/base
This is the 5.2.59 stable release
This commit is contained in:
commit
cdd2f14e26
|
@ -1563,7 +1563,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
|
|||
KernelVersion: 4.3
|
||||
Contact: linux-iio@vger.kernel.org
|
||||
Description:
|
||||
Raw (unscaled no offset etc.) percentage reading of a substance.
|
||||
Raw (unscaled no offset etc.) reading of a substance. Units
|
||||
after application of scale and offset are percents.
|
||||
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
|
||||
What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 58
|
||||
SUBLEVEL = 59
|
||||
EXTRAVERSION =
|
||||
NAME = Bobtail Squid
|
||||
|
||||
|
|
|
@ -339,7 +339,7 @@
|
|||
reg = <0x20>;
|
||||
remote = <&vin1>;
|
||||
|
||||
port {
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -399,7 +399,7 @@
|
|||
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
|
||||
default-input = <0>;
|
||||
|
||||
port {
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
|
|
@ -16,16 +16,28 @@
|
|||
regulator-type = "voltage";
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <1100000>;
|
||||
regulator-max-microvolt = <1300000>;
|
||||
regulator-min-microvolt = <1108475>;
|
||||
regulator-max-microvolt = <1308475>;
|
||||
regulator-ramp-delay = <50>; /* 4ms */
|
||||
gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */
|
||||
gpios-states = <0x1>;
|
||||
states = <1100000 0x0
|
||||
1300000 0x1>;
|
||||
states = <1108475 0x0
|
||||
1308475 0x1>;
|
||||
};
|
||||
};
|
||||
|
||||
&cpu0 {
|
||||
cpu-supply = <®_vdd_cpux>;
|
||||
};
|
||||
|
||||
&cpu1 {
|
||||
cpu-supply = <®_vdd_cpux>;
|
||||
};
|
||||
|
||||
&cpu2 {
|
||||
cpu-supply = <®_vdd_cpux>;
|
||||
};
|
||||
|
||||
&cpu3 {
|
||||
cpu-supply = <®_vdd_cpux>;
|
||||
};
|
||||
|
|
|
@ -22,6 +22,19 @@
|
|||
* A simple function epilogue looks like this:
|
||||
* ldm sp, {fp, sp, pc}
|
||||
*
|
||||
* When compiled with clang, pc and sp are not pushed. A simple function
|
||||
* prologue looks like this when built with clang:
|
||||
*
|
||||
* stmdb {..., fp, lr}
|
||||
* add fp, sp, #x
|
||||
* sub sp, sp, #y
|
||||
*
|
||||
* A simple function epilogue looks like this when built with clang:
|
||||
*
|
||||
* sub sp, fp, #x
|
||||
* ldm {..., fp, pc}
|
||||
*
|
||||
*
|
||||
* Note that with framepointer enabled, even the leaf functions have the same
|
||||
* prologue and epilogue, therefore we can ignore the LR value in this case.
|
||||
*/
|
||||
|
@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame)
|
|||
low = frame->sp;
|
||||
high = ALIGN(low, THREAD_SIZE);
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < low + 4 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
|
||||
frame->sp = frame->fp;
|
||||
frame->fp = *(unsigned long *)(fp);
|
||||
frame->pc = frame->lr;
|
||||
frame->lr = *(unsigned long *)(fp + 4);
|
||||
#else
|
||||
/* check current frame pointer is within bounds */
|
||||
if (fp < low + 12 || fp > high - 4)
|
||||
return -EINVAL;
|
||||
|
@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame)
|
|||
frame->fp = *(unsigned long *)(fp - 12);
|
||||
frame->sp = *(unsigned long *)(fp - 8);
|
||||
frame->pc = *(unsigned long *)(fp - 4);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -591,13 +591,13 @@ static void __init at91_pm_sram_init(void)
|
|||
sram_pool = gen_pool_get(&pdev->dev, NULL);
|
||||
if (!sram_pool) {
|
||||
pr_warn("%s: sram pool unavailable!\n", __func__);
|
||||
return;
|
||||
goto out_put_device;
|
||||
}
|
||||
|
||||
sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
|
||||
if (!sram_base) {
|
||||
pr_warn("%s: unable to alloc sram!\n", __func__);
|
||||
return;
|
||||
goto out_put_device;
|
||||
}
|
||||
|
||||
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
|
||||
|
@ -605,12 +605,17 @@ static void __init at91_pm_sram_init(void)
|
|||
at91_pm_suspend_in_sram_sz, false);
|
||||
if (!at91_suspend_sram_fn) {
|
||||
pr_warn("SRAM: Could not map\n");
|
||||
return;
|
||||
goto out_put_device;
|
||||
}
|
||||
|
||||
/* Copy the pm suspend handler to SRAM */
|
||||
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
|
||||
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
|
||||
return;
|
||||
|
||||
out_put_device:
|
||||
put_device(&pdev->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
static bool __init at91_is_pm_mode_active(int pm_mode)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30)
|
||||
|
||||
static void __iomem *ns_sram_base_addr __ro_after_init;
|
||||
static bool secure_firmware __ro_after_init;
|
||||
|
||||
/*
|
||||
* The common v7_exit_coherency_flush API could not be used because of the
|
||||
|
@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init;
|
|||
static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
|
||||
bool state;
|
||||
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
|
||||
cluster >= EXYNOS5420_NR_CLUSTERS)
|
||||
return -EINVAL;
|
||||
|
||||
if (!exynos_cpu_power_state(cpunr)) {
|
||||
exynos_cpu_power_up(cpunr);
|
||||
|
||||
state = exynos_cpu_power_state(cpunr);
|
||||
exynos_cpu_power_up(cpunr);
|
||||
if (!state && secure_firmware) {
|
||||
/*
|
||||
* This assumes the cluster number of the big cores(Cortex A15)
|
||||
* is 0 and the Little cores(Cortex A7) is 1.
|
||||
|
@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
secure_firmware = exynos_secure_firmware_available();
|
||||
|
||||
/*
|
||||
* To increase the stability of KFC reset we need to program
|
||||
* the PMU SPARE3 register
|
||||
|
|
|
@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void)
|
|||
if (!ocram_pool) {
|
||||
pr_warn("%s: ocram pool unavailable!\n", __func__);
|
||||
ret = -ENODEV;
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
|
||||
if (!ocram_base) {
|
||||
pr_warn("%s: unable to alloc ocram!\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
|
||||
|
@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void)
|
|||
if (!suspend_ocram_base) {
|
||||
pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
/* Copy the code that puts DDR in self refresh to ocram */
|
||||
|
@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void)
|
|||
if (!socfpga_sdram_self_refresh_in_ocram)
|
||||
ret = -EFAULT;
|
||||
|
||||
put_device:
|
||||
put_device(&pdev->dev);
|
||||
put_node:
|
||||
of_node_put(np);
|
||||
|
||||
|
|
|
@ -152,6 +152,7 @@
|
|||
regulator-min-microvolt = <700000>;
|
||||
regulator-max-microvolt = <1150000>;
|
||||
regulator-enable-ramp-delay = <125>;
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
ldo8_reg: LDO8 {
|
||||
|
|
|
@ -530,6 +530,17 @@
|
|||
status = "ok";
|
||||
compatible = "adi,adv7533";
|
||||
reg = <0x39>;
|
||||
adi,dsi-lanes = <4>;
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
};
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -516,7 +516,7 @@
|
|||
reg = <0x39>;
|
||||
interrupt-parent = <&gpio1>;
|
||||
interrupts = <1 2>;
|
||||
pd-gpio = <&gpio0 4 0>;
|
||||
pd-gpios = <&gpio0 4 0>;
|
||||
adi,dsi-lanes = <4>;
|
||||
#sound-dai-cells = <0>;
|
||||
|
||||
|
|
|
@ -508,7 +508,7 @@
|
|||
pins = "gpio63", "gpio64", "gpio65", "gpio66",
|
||||
"gpio67", "gpio68";
|
||||
drive-strength = <8>;
|
||||
bias-pull-none;
|
||||
bias-disable;
|
||||
};
|
||||
};
|
||||
cdc_pdm_lines_sus: pdm_lines_off {
|
||||
|
@ -537,7 +537,7 @@
|
|||
pins = "gpio113", "gpio114", "gpio115",
|
||||
"gpio116";
|
||||
drive-strength = <8>;
|
||||
bias-pull-none;
|
||||
bias-disable;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -565,7 +565,7 @@
|
|||
pinconf {
|
||||
pins = "gpio110";
|
||||
drive-strength = <8>;
|
||||
bias-pull-none;
|
||||
bias-disable;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -591,7 +591,7 @@
|
|||
pinconf {
|
||||
pins = "gpio116";
|
||||
drive-strength = <8>;
|
||||
bias-pull-none;
|
||||
bias-disable;
|
||||
};
|
||||
};
|
||||
ext_mclk_tlmm_lines_sus: mclk_lines_off {
|
||||
|
@ -619,7 +619,7 @@
|
|||
pins = "gpio112", "gpio117", "gpio118",
|
||||
"gpio119";
|
||||
drive-strength = <8>;
|
||||
bias-pull-none;
|
||||
bias-disable;
|
||||
};
|
||||
};
|
||||
ext_sec_tlmm_lines_sus: tlmm_lines_off {
|
||||
|
|
|
@ -156,7 +156,7 @@
|
|||
pinctrl-0 = <&rgmii_pins>;
|
||||
snps,reset-active-low;
|
||||
snps,reset-delays-us = <0 10000 50000>;
|
||||
snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
|
||||
snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>;
|
||||
tx_delay = <0x10>;
|
||||
rx_delay = <0x10>;
|
||||
status = "okay";
|
||||
|
|
|
@ -101,7 +101,7 @@
|
|||
|
||||
vcc5v0_host: vcc5v0-host-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
|
||||
enable-active-low;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&vcc5v0_host_en>;
|
||||
|
@ -157,7 +157,7 @@
|
|||
phy-mode = "rgmii";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&rgmii_pins>;
|
||||
snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
|
||||
snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
|
||||
snps,reset-active-low;
|
||||
snps,reset-delays-us = <0 10000 50000>;
|
||||
tx_delay = <0x10>;
|
||||
|
|
|
@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
|
|||
|
||||
static __inline__ void iop_stop(volatile struct mac_iop *iop)
|
||||
{
|
||||
iop->status_ctrl &= ~IOP_RUN;
|
||||
iop->status_ctrl = IOP_AUTOINC;
|
||||
}
|
||||
|
||||
static __inline__ void iop_start(volatile struct mac_iop *iop)
|
||||
|
@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
|
|||
iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
|
||||
}
|
||||
|
||||
static __inline__ void iop_bypass(volatile struct mac_iop *iop)
|
||||
{
|
||||
iop->status_ctrl |= IOP_BYPASS;
|
||||
}
|
||||
|
||||
static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
|
||||
{
|
||||
iop->status_ctrl |= IOP_IRQ;
|
||||
iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
|
||||
}
|
||||
|
||||
static int iop_alive(volatile struct mac_iop *iop)
|
||||
|
@ -244,7 +239,6 @@ void __init iop_preinit(void)
|
|||
} else {
|
||||
iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
|
||||
}
|
||||
iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
|
||||
iop_scc_present = 1;
|
||||
} else {
|
||||
iop_base[IOP_NUM_SCC] = NULL;
|
||||
|
@ -256,7 +250,7 @@ void __init iop_preinit(void)
|
|||
} else {
|
||||
iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
|
||||
}
|
||||
iop_base[IOP_NUM_ISM]->status_ctrl = 0;
|
||||
iop_stop(iop_base[IOP_NUM_ISM]);
|
||||
iop_ism_present = 1;
|
||||
} else {
|
||||
iop_base[IOP_NUM_ISM] = NULL;
|
||||
|
@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
|
|||
msg->status = IOP_MSGSTATUS_UNUSED;
|
||||
msg = msg->next;
|
||||
iop_send_queue[iop_num][chan] = msg;
|
||||
if (msg) iop_do_send(msg);
|
||||
if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
|
||||
iop_do_send(msg);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
|
|||
|
||||
if (!(q = iop_send_queue[iop_num][chan])) {
|
||||
iop_send_queue[iop_num][chan] = msg;
|
||||
iop_do_send(msg);
|
||||
} else {
|
||||
while (q->next) q = q->next;
|
||||
q->next = msg;
|
||||
}
|
||||
|
||||
if (iop_readb(iop_base[iop_num],
|
||||
IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
|
||||
iop_do_send(msg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -517,6 +517,7 @@ static int __init dwc3_octeon_device_init(void)
|
|||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res == NULL) {
|
||||
put_device(&pdev->dev);
|
||||
dev_err(&pdev->dev, "No memory resources\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
@ -528,8 +529,10 @@ static int __init dwc3_octeon_device_init(void)
|
|||
* know the difference.
|
||||
*/
|
||||
base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(base))
|
||||
if (IS_ERR(base)) {
|
||||
put_device(&pdev->dev);
|
||||
return PTR_ERR(base);
|
||||
}
|
||||
|
||||
mutex_lock(&dwc3_octeon_clocks_mutex);
|
||||
dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
|
||||
|
|
|
@ -26,6 +26,67 @@
|
|||
#define __smp_rmb() mb()
|
||||
#define __smp_wmb() mb()
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
typeof(p) __p = (p); \
|
||||
union { typeof(*p) __val; char __c[1]; } __u = \
|
||||
{ .__val = (__force typeof(*p)) (v) }; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile("stb,ma %0,0(%1)" \
|
||||
: : "r"(*(__u8 *)__u.__c), "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile("sth,ma %0,0(%1)" \
|
||||
: : "r"(*(__u16 *)__u.__c), "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile("stw,ma %0,0(%1)" \
|
||||
: : "r"(*(__u32 *)__u.__c), "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
if (IS_ENABLED(CONFIG_64BIT)) \
|
||||
asm volatile("std,ma %0,0(%1)" \
|
||||
: : "r"(*(__u64 *)__u.__c), "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
union { typeof(*p) __val; char __c[1]; } __u; \
|
||||
typeof(p) __p = (p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile("ldb,ma 0(%1),%0" \
|
||||
: "=r"(*(__u8 *)__u.__c) : "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile("ldh,ma 0(%1),%0" \
|
||||
: "=r"(*(__u16 *)__u.__c) : "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile("ldw,ma 0(%1),%0" \
|
||||
: "=r"(*(__u32 *)__u.__c) : "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
if (IS_ENABLED(CONFIG_64BIT)) \
|
||||
asm volatile("ldd,ma 0(%1),%0" \
|
||||
: "=r"(*(__u64 *)__u.__c) : "r"(__p) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
__u.__val; \
|
||||
})
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
|
|||
volatile unsigned int *a;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
#ifdef CONFIG_SMP
|
||||
(void) __ldcw(a);
|
||||
#else
|
||||
mb();
|
||||
#endif
|
||||
*a = 1;
|
||||
/* Release with ordered store. */
|
||||
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||
|
|
|
@ -454,7 +454,6 @@
|
|||
nop
|
||||
LDREG 0(\ptp),\pte
|
||||
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
|
||||
LDCW 0(\tmp),\tmp1
|
||||
b \fault
|
||||
stw \spc,0(\tmp)
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
|
@ -464,23 +463,26 @@
|
|||
3:
|
||||
.endm
|
||||
|
||||
/* Release pa_tlb_lock lock without reloading lock address. */
|
||||
.macro tlb_unlock0 spc,tmp,tmp1
|
||||
/* Release pa_tlb_lock lock without reloading lock address.
|
||||
Note that the values in the register spc are limited to
|
||||
NR_SPACE_IDS (262144). Thus, the stw instruction always
|
||||
stores a nonzero value even when register spc is 64 bits.
|
||||
We use an ordered store to ensure all prior accesses are
|
||||
performed prior to releasing the lock. */
|
||||
.macro tlb_unlock0 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
98: or,COND(=) %r0,\spc,%r0
|
||||
LDCW 0(\tmp),\tmp1
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw \spc,0(\tmp)
|
||||
stw,ma \spc,0(\tmp)
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* Release pa_tlb_lock lock. */
|
||||
.macro tlb_unlock1 spc,tmp,tmp1
|
||||
.macro tlb_unlock1 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
98: load_pa_tlb_lock \tmp
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
tlb_unlock0 \spc,\tmp,\tmp1
|
||||
tlb_unlock0 \spc,\tmp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -1163,7 +1165,7 @@ dtlb_miss_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1223,7 +1225,7 @@ dtlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1256,7 +1258,7 @@ nadtlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1285,7 +1287,7 @@ dtlb_miss_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1313,7 +1315,7 @@ nadtlb_miss_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1420,7 +1422,7 @@ itlb_miss_20w:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1444,7 +1446,7 @@ naitlb_miss_20w:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1478,7 +1480,7 @@ itlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1502,7 +1504,7 @@ naitlb_miss_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1532,7 +1534,7 @@ itlb_miss_20:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1552,7 +1554,7 @@ naitlb_miss_20:
|
|||
|
||||
iitlbt pte,prot
|
||||
|
||||
tlb_unlock1 spc,t0,t1
|
||||
tlb_unlock1 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1582,7 +1584,7 @@ dbit_trap_20w:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock0 spc,t0,t1
|
||||
tlb_unlock0 spc,t0
|
||||
rfir
|
||||
nop
|
||||
#else
|
||||
|
@ -1608,7 +1610,7 @@ dbit_trap_11:
|
|||
|
||||
mtsp t1, %sr1 /* Restore sr1 */
|
||||
|
||||
tlb_unlock0 spc,t0,t1
|
||||
tlb_unlock0 spc,t0
|
||||
rfir
|
||||
nop
|
||||
|
||||
|
@ -1628,7 +1630,7 @@ dbit_trap_20:
|
|||
|
||||
idtlbt pte,prot
|
||||
|
||||
tlb_unlock0 spc,t0,t1
|
||||
tlb_unlock0 spc,t0
|
||||
rfir
|
||||
nop
|
||||
#endif
|
||||
|
|
|
@ -640,11 +640,7 @@ cas_action:
|
|||
sub,<> %r28, %r25, %r0
|
||||
2: stw %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
/* Clear thread register indicator */
|
||||
stw %r0, 4(%sr2,%r20)
|
||||
|
@ -658,11 +654,7 @@ cas_action:
|
|||
3:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
stw %r0, 4(%sr2,%r20)
|
||||
#endif
|
||||
|
@ -863,11 +855,7 @@ cas2_action:
|
|||
|
||||
cas2_end:
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
ssm PSW_SM_I, %r0
|
||||
/* Return to userspace, set no error */
|
||||
|
@ -877,11 +865,7 @@ cas2_end:
|
|||
22:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
#ifdef CONFIG_SMP
|
||||
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
|
||||
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
|
||||
#endif
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
ssm PSW_SM_I, %r0
|
||||
ldo 1(%r0),%r28
|
||||
b lws_exit
|
||||
|
|
|
@ -122,7 +122,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
|
|||
elf_util.c $(zlib-y) devtree.c stdlib.c \
|
||||
oflib.c ofconsole.c cuboot.c
|
||||
|
||||
src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
|
||||
src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
|
||||
src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
|
||||
ifndef CONFIG_PPC64_BOOT_WRAPPER
|
||||
src-wlib-y += crtsavres.S
|
||||
|
|
|
@ -129,7 +129,7 @@ int serial_console_init(void)
|
|||
dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
|
||||
rc = cpm_console_init(devp, &serial_cd);
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_MPC52XX
|
||||
#ifdef CONFIG_PPC_MPC52xx
|
||||
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
|
||||
rc = mpc5200_psc_console_init(devp, &serial_cd);
|
||||
#endif
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
|
||||
#ifdef CONFIG_PPC_PERF_CTRS
|
||||
#include <asm/perf_event_server.h>
|
||||
#else
|
||||
static inline bool is_sier_available(void) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FSL_EMB_PERF_EVENT
|
||||
|
|
|
@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
|
|||
extern void rtas_progress(char *s, unsigned short hex);
|
||||
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
|
||||
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
|
||||
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
|
||||
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
|
||||
extern int rtas_ibm_suspend_me(u64 handle);
|
||||
|
||||
struct rtc_time;
|
||||
|
|
|
@ -841,97 +841,6 @@ static void rtas_percpu_suspend_me(void *info)
|
|||
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
|
||||
}
|
||||
|
||||
enum rtas_cpu_state {
|
||||
DOWN,
|
||||
UP,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
|
||||
cpumask_var_t cpus)
|
||||
{
|
||||
if (!cpumask_empty(cpus)) {
|
||||
cpumask_clear(cpus);
|
||||
return -EINVAL;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
/* On return cpumask will be altered to indicate CPUs changed.
|
||||
* CPUs with states changed will be set in the mask,
|
||||
* CPUs with status unchanged will be unset in the mask. */
|
||||
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
|
||||
cpumask_var_t cpus)
|
||||
{
|
||||
int cpu;
|
||||
int cpuret = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (cpumask_empty(cpus))
|
||||
return 0;
|
||||
|
||||
for_each_cpu(cpu, cpus) {
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
switch (state) {
|
||||
case DOWN:
|
||||
cpuret = device_offline(dev);
|
||||
break;
|
||||
case UP:
|
||||
cpuret = device_online(dev);
|
||||
break;
|
||||
}
|
||||
if (cpuret < 0) {
|
||||
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
|
||||
__func__,
|
||||
((state == UP) ? "up" : "down"),
|
||||
cpu, cpuret);
|
||||
if (!ret)
|
||||
ret = cpuret;
|
||||
if (state == UP) {
|
||||
/* clear bits for unchanged cpus, return */
|
||||
cpumask_shift_right(cpus, cpus, cpu);
|
||||
cpumask_shift_left(cpus, cpus, cpu);
|
||||
break;
|
||||
} else {
|
||||
/* clear bit for unchanged cpu, continue */
|
||||
cpumask_clear_cpu(cpu, cpus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int rtas_online_cpus_mask(cpumask_var_t cpus)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rtas_cpu_state_change_mask(UP, cpus);
|
||||
|
||||
if (ret) {
|
||||
cpumask_var_t tmp_mask;
|
||||
|
||||
if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
|
||||
return ret;
|
||||
|
||||
/* Use tmp_mask to preserve cpus mask from first failure */
|
||||
cpumask_copy(tmp_mask, cpus);
|
||||
rtas_offline_cpus_mask(tmp_mask);
|
||||
free_cpumask_var(tmp_mask);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rtas_online_cpus_mask);
|
||||
|
||||
int rtas_offline_cpus_mask(cpumask_var_t cpus)
|
||||
{
|
||||
return rtas_cpu_state_change_mask(DOWN, cpus);
|
||||
}
|
||||
EXPORT_SYMBOL(rtas_offline_cpus_mask);
|
||||
|
||||
int rtas_ibm_suspend_me(u64 handle)
|
||||
{
|
||||
long state;
|
||||
|
@ -939,8 +848,6 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
struct rtas_suspend_me_data data;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
cpumask_var_t offline_mask;
|
||||
int cpuret;
|
||||
|
||||
if (!rtas_service_present("ibm,suspend-me"))
|
||||
return -ENOSYS;
|
||||
|
@ -961,9 +868,6 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&data.working, 0);
|
||||
atomic_set(&data.done, 0);
|
||||
atomic_set(&data.error, 0);
|
||||
|
@ -972,24 +876,8 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
|
||||
lock_device_hotplug();
|
||||
|
||||
/* All present CPUs must be online */
|
||||
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
|
||||
cpuret = rtas_online_cpus_mask(offline_mask);
|
||||
if (cpuret) {
|
||||
pr_err("%s: Could not bring present CPUs online.\n", __func__);
|
||||
atomic_set(&data.error, cpuret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpu_hotplug_disable();
|
||||
|
||||
/* Check if we raced with a CPU-Offline Operation */
|
||||
if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
|
||||
pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
|
||||
atomic_set(&data.error, -EAGAIN);
|
||||
goto out_hotplug_enable;
|
||||
}
|
||||
|
||||
/* Call function on all CPUs. One of us will make the
|
||||
* rtas call
|
||||
*/
|
||||
|
@ -1001,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
if (atomic_read(&data.error) != 0)
|
||||
printk(KERN_ERR "Error doing global join\n");
|
||||
|
||||
out_hotplug_enable:
|
||||
|
||||
cpu_hotplug_enable();
|
||||
|
||||
/* Take down CPUs not online prior to suspend */
|
||||
cpuret = rtas_offline_cpus_mask(offline_mask);
|
||||
if (cpuret)
|
||||
pr_warn("%s: Could not restore CPUs to offline state.\n",
|
||||
__func__);
|
||||
|
||||
out:
|
||||
unlock_device_hotplug();
|
||||
free_cpumask_var(offline_mask);
|
||||
|
||||
return atomic_read(&data.error);
|
||||
}
|
||||
#else /* CONFIG_PPC_PSERIES */
|
||||
|
|
|
@ -704,7 +704,7 @@ int vdso_getcpu_init(void)
|
|||
node = cpu_to_node(cpu);
|
||||
WARN_ON_ONCE(node > 0xffff);
|
||||
|
||||
val = (cpu & 0xfff) | ((node & 0xffff) << 16);
|
||||
val = (cpu & 0xffff) | ((node & 0xffff) << 16);
|
||||
mtspr(SPRN_SPRG_VDSO_WRITE, val);
|
||||
get_paca()->sprg_vdso = val;
|
||||
|
||||
|
|
|
@ -83,13 +83,17 @@ static int pkey_initialize(void)
|
|||
scan_pkey_feature();
|
||||
|
||||
/*
|
||||
* Let's assume 32 pkeys on P8 bare metal, if its not defined by device
|
||||
* tree. We make this exception since skiboot forgot to expose this
|
||||
* property on power8.
|
||||
* Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
|
||||
* tree. We make this exception since some version of skiboot forgot to
|
||||
* expose this property on power8/9.
|
||||
*/
|
||||
if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
cpu_has_feature(CPU_FTRS_POWER8))
|
||||
pkeys_total = 32;
|
||||
if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
unsigned long pvr = mfspr(SPRN_PVR);
|
||||
|
||||
if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
|
||||
PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
|
||||
pkeys_total = 32;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the upper limit, based on the number of bits supported by
|
||||
|
|
|
@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev,
|
|||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
cpumask_var_t offline_mask;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
stream_id = simple_strtoul(buf, NULL, 16);
|
||||
|
||||
do {
|
||||
|
@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev,
|
|||
} while (rc == -EAGAIN);
|
||||
|
||||
if (!rc) {
|
||||
/* All present CPUs must be online */
|
||||
cpumask_andnot(offline_mask, cpu_present_mask,
|
||||
cpu_online_mask);
|
||||
rc = rtas_online_cpus_mask(offline_mask);
|
||||
if (rc) {
|
||||
pr_err("%s: Could not bring present CPUs online.\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
stop_topology_update();
|
||||
rc = pm_suspend(PM_SUSPEND_MEM);
|
||||
start_topology_update();
|
||||
|
||||
/* Take down CPUs not online prior to suspend */
|
||||
if (!rtas_offline_cpus_mask(offline_mask))
|
||||
pr_warn("%s: Could not restore CPUs to offline "
|
||||
"state.\n", __func__);
|
||||
}
|
||||
|
||||
stream_id = 0;
|
||||
|
||||
if (!rc)
|
||||
rc = count;
|
||||
out:
|
||||
free_cpumask_var(offline_mask);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk)
|
||||
{
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
|
||||
split_huge_pmd(vma, pmd, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops thp_split_walk_ops = {
|
||||
.pmd_entry = thp_split_walk_pmd_entry,
|
||||
};
|
||||
|
||||
static inline void thp_split_mm(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr;
|
||||
|
||||
for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
|
||||
for (addr = vma->vm_start;
|
||||
addr < vma->vm_end;
|
||||
addr += PAGE_SIZE)
|
||||
follow_page(vma, addr, FOLL_SPLIT);
|
||||
vma->vm_flags &= ~VM_HUGEPAGE;
|
||||
vma->vm_flags |= VM_NOHUGEPAGE;
|
||||
walk_page_vma(vma, &thp_split_walk_ops, NULL);
|
||||
}
|
||||
mm->def_flags |= VM_NOHUGEPAGE;
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static inline void thp_split_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/*
|
||||
* Remove all empty zero pages from the mapping for lazy refaulting
|
||||
|
|
|
@ -127,10 +127,6 @@ ddq_add_8:
|
|||
|
||||
/* generate a unique variable for ddq_add_x */
|
||||
|
||||
.macro setddq n
|
||||
var_ddq_add = ddq_add_\n
|
||||
.endm
|
||||
|
||||
/* generate a unique variable for xmm register */
|
||||
.macro setxdata n
|
||||
var_xdata = %xmm\n
|
||||
|
@ -140,9 +136,7 @@ ddq_add_8:
|
|||
|
||||
.macro club name, id
|
||||
.altmacro
|
||||
.if \name == DDQ_DATA
|
||||
setddq %\id
|
||||
.elseif \name == XDATA
|
||||
.if \name == XDATA
|
||||
setxdata %\id
|
||||
.endif
|
||||
.noaltmacro
|
||||
|
@ -165,9 +159,8 @@ ddq_add_8:
|
|||
|
||||
.set i, 1
|
||||
.rept (by - 1)
|
||||
club DDQ_DATA, i
|
||||
club XDATA, i
|
||||
vpaddq var_ddq_add(%rip), xcounter, var_xdata
|
||||
vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
|
||||
vptest ddq_low_msk(%rip), var_xdata
|
||||
jnz 1f
|
||||
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
|
||||
|
@ -180,8 +173,7 @@ ddq_add_8:
|
|||
vmovdqa 1*16(p_keys), xkeyA
|
||||
|
||||
vpxor xkey0, xdata0, xdata0
|
||||
club DDQ_DATA, by
|
||||
vpaddq var_ddq_add(%rip), xcounter, xcounter
|
||||
vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
|
||||
vptest ddq_low_msk(%rip), xcounter
|
||||
jnz 1f
|
||||
vpaddq ddq_high_add_1(%rip), xcounter, xcounter
|
||||
|
|
|
@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
|
|||
PSHUFB_XMM %xmm2, %xmm0
|
||||
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
|
||||
|
||||
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
|
||||
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
|
||||
movdqu HashKey(%arg2), %xmm13
|
||||
|
||||
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
|
||||
|
@ -978,7 +978,7 @@ _initial_blocks_done\@:
|
|||
* arg1, %arg3, %arg4 are used as pointers only, not modified
|
||||
* %r11 is the data offset value
|
||||
*/
|
||||
.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
|
||||
.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
|
||||
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
||||
|
||||
movdqa \XMM1, \XMM5
|
||||
|
@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@:
|
|||
* arg1, %arg3, %arg4 are used as pointers only, not modified
|
||||
* %r11 is the data offset value
|
||||
*/
|
||||
.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
|
||||
.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
|
||||
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
|
||||
|
||||
movdqa \XMM1, \XMM5
|
||||
|
|
|
@ -511,7 +511,7 @@ static void do_inject(void)
|
|||
*/
|
||||
if (inj_type == DFR_INT_INJ) {
|
||||
i_mce.status |= MCI_STATUS_DEFERRED;
|
||||
i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
|
||||
i_mce.status &= ~MCI_STATUS_UC;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -326,7 +326,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
|
|||
*/
|
||||
mutex_lock(&task->mm->context.lock);
|
||||
ldt = task->mm->context.ldt;
|
||||
if (unlikely(idx >= ldt->nr_entries))
|
||||
if (unlikely(!ldt || idx >= ldt->nr_entries))
|
||||
base = 0;
|
||||
else
|
||||
base = get_desc_base(ldt->entries + idx);
|
||||
|
|
|
@ -256,7 +256,7 @@ static void bfqg_put(struct bfq_group *bfqg)
|
|||
kfree(bfqg);
|
||||
}
|
||||
|
||||
void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
||||
static void bfqg_and_blkg_get(struct bfq_group *bfqg)
|
||||
{
|
||||
/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
|
||||
bfqg_get(bfqg);
|
||||
|
|
|
@ -949,7 +949,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
|||
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
||||
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
||||
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
||||
void bfqg_and_blkg_get(struct bfq_group *bfqg);
|
||||
void bfqg_and_blkg_put(struct bfq_group *bfqg);
|
||||
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
|
|
|
@ -536,9 +536,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
|
|||
bfqq->ref++;
|
||||
bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
|
||||
bfqq, bfqq->ref);
|
||||
} else
|
||||
bfqg_and_blkg_get(container_of(entity, struct bfq_group,
|
||||
entity));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -652,14 +650,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
|
|||
|
||||
entity->on_st = false;
|
||||
st->wsum -= entity->weight;
|
||||
if (is_in_service)
|
||||
return;
|
||||
|
||||
if (bfqq)
|
||||
if (bfqq && !is_in_service)
|
||||
bfq_put_queue(bfqq);
|
||||
else
|
||||
bfqg_and_blkg_put(container_of(entity, struct bfq_group,
|
||||
entity));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
|
|||
(u8)access_byte_width;
|
||||
}
|
||||
}
|
||||
/* An additional reference for the container */
|
||||
|
||||
acpi_ut_add_reference(obj_desc->field.region_obj);
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
||||
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
|
||||
obj_desc->field.start_field_bit_offset,
|
||||
|
|
|
@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
|
|||
next_object = object->buffer_field.buffer_obj;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||
|
||||
next_object = object->field.region_obj;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_BANK_FIELD:
|
||||
|
||||
next_object = object->bank_field.bank_obj;
|
||||
|
@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
|
|||
}
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_LOCAL_REGION_FIELD:
|
||||
case ACPI_TYPE_REGION:
|
||||
default:
|
||||
|
||||
|
|
|
@ -889,6 +889,7 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
struct file *file = lo->lo_backing_file;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct request_queue *q = lo->lo_queue;
|
||||
u32 granularity, max_discard_sectors;
|
||||
|
||||
/*
|
||||
* If the backing device is a block device, mirror its zeroing
|
||||
|
@ -901,11 +902,10 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
struct request_queue *backingq;
|
||||
|
||||
backingq = bdev_get_queue(inode->i_bdev);
|
||||
blk_queue_max_discard_sectors(q,
|
||||
backingq->limits.max_write_zeroes_sectors);
|
||||
|
||||
blk_queue_max_write_zeroes_sectors(q,
|
||||
backingq->limits.max_write_zeroes_sectors);
|
||||
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
|
||||
granularity = backingq->limits.discard_granularity ?:
|
||||
queue_physical_block_size(backingq);
|
||||
|
||||
/*
|
||||
* We use punch hole to reclaim the free space used by the
|
||||
|
@ -914,23 +914,26 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
* useful information.
|
||||
*/
|
||||
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
|
||||
q->limits.discard_granularity = 0;
|
||||
q->limits.discard_alignment = 0;
|
||||
blk_queue_max_discard_sectors(q, 0);
|
||||
blk_queue_max_write_zeroes_sectors(q, 0);
|
||||
max_discard_sectors = 0;
|
||||
granularity = 0;
|
||||
|
||||
} else {
|
||||
q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = 0;
|
||||
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||||
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
|
||||
max_discard_sectors = UINT_MAX >> 9;
|
||||
granularity = inode->i_sb->s_blocksize;
|
||||
}
|
||||
|
||||
if (q->limits.max_write_zeroes_sectors)
|
||||
if (max_discard_sectors) {
|
||||
q->limits.discard_granularity = granularity;
|
||||
blk_queue_max_discard_sectors(q, max_discard_sectors);
|
||||
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||||
else
|
||||
} else {
|
||||
q->limits.discard_granularity = 0;
|
||||
blk_queue_max_discard_sectors(q, 0);
|
||||
blk_queue_max_write_zeroes_sectors(q, 0);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||||
}
|
||||
q->limits.discard_alignment = 0;
|
||||
}
|
||||
|
||||
static void loop_unprepare_queue(struct loop_device *lo)
|
||||
|
@ -2351,6 +2354,8 @@ static void __exit loop_exit(void)
|
|||
|
||||
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
|
||||
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
|
||||
idr_destroy(&loop_index_idr);
|
||||
|
||||
|
@ -2358,6 +2363,8 @@ static void __exit loop_exit(void)
|
|||
unregister_blkdev(LOOP_MAJOR, "loop");
|
||||
|
||||
misc_deregister(&loop_misc);
|
||||
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
}
|
||||
|
||||
module_init(loop_init);
|
||||
|
|
|
@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
|
|||
|
||||
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
|
||||
.helper = NULL,
|
||||
.firmware = "mrvl/sd8977_uapsta.bin",
|
||||
.firmware = "mrvl/sdsd8977_combo_v2.bin",
|
||||
.reg = &btmrvl_reg_8977,
|
||||
.support_pscan_win_report = true,
|
||||
.sd_blksz_fw_dl = 256,
|
||||
|
@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
|
|||
|
||||
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
|
||||
.helper = NULL,
|
||||
.firmware = "mrvl/sd8997_uapsta.bin",
|
||||
.firmware = "mrvl/sdsd8997_combo_v4.bin",
|
||||
.reg = &btmrvl_reg_8997,
|
||||
.support_pscan_win_report = true,
|
||||
.sd_blksz_fw_dl = 256,
|
||||
|
@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
|
|||
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
|
||||
MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
|
||||
MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
|
||||
|
|
|
@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
|||
const u8 *fw_ptr;
|
||||
size_t fw_size;
|
||||
int err, dlen;
|
||||
u8 flag;
|
||||
u8 flag, param;
|
||||
|
||||
err = request_firmware(&fw, fwname, &hdev->dev);
|
||||
if (err < 0) {
|
||||
|
@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Power on data RAM the firmware relies on. */
|
||||
param = 1;
|
||||
wmt_params.op = MTK_WMT_FUNC_CTRL;
|
||||
wmt_params.flag = 3;
|
||||
wmt_params.dlen = sizeof(param);
|
||||
wmt_params.data = ¶m;
|
||||
wmt_params.status = NULL;
|
||||
|
||||
err = mtk_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
fw_ptr = fw->data;
|
||||
fw_size = fw->size;
|
||||
|
||||
|
|
|
@ -790,7 +790,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
|
|||
if (!h5)
|
||||
return -ENOMEM;
|
||||
|
||||
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
|
||||
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
|
||||
|
||||
h5->hu = &h5->serdev_hu;
|
||||
h5->serdev_hu.serdev = serdev;
|
||||
|
|
|
@ -357,7 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
|
|||
struct hci_dev *hdev = hu->hdev;
|
||||
|
||||
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
|
||||
hci_unregister_dev(hdev);
|
||||
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
|
||||
cancel_work_sync(&hu->write_work);
|
||||
|
|
|
@ -1109,6 +1109,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
|||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
|
||||
|
@ -1181,8 +1185,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
|||
SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
|
||||
0xffffffff, 0),
|
||||
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 0),
|
||||
|
|
|
@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
|
|||
if (intel_private.needs_dmar) {
|
||||
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
|
||||
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
|
||||
__free_page(page);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_private.scratch_page_dma = dma_addr;
|
||||
} else
|
||||
|
|
|
@ -389,13 +389,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
|
|||
chip->cdev.owner = THIS_MODULE;
|
||||
chip->cdevs.owner = THIS_MODULE;
|
||||
|
||||
chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!chip->work_space.context_buf) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!chip->work_space.session_buf) {
|
||||
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
|
||||
if (rc) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -177,6 +177,9 @@ struct tpm_header {
|
|||
|
||||
#define TPM_TAG_RQU_COMMAND 193
|
||||
|
||||
/* TPM2 specific constants. */
|
||||
#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
|
||||
|
||||
struct stclear_flags_t {
|
||||
__be16 tag;
|
||||
u8 deactivated;
|
||||
|
@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
|
|||
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
|
||||
int tpm2_probe(struct tpm_chip *chip);
|
||||
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
|
||||
int tpm2_init_space(struct tpm_space *space);
|
||||
int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
|
||||
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
|
||||
void tpm2_flush_space(struct tpm_chip *chip);
|
||||
int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
|
||||
|
|
|
@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
|
|||
}
|
||||
}
|
||||
|
||||
int tpm2_init_space(struct tpm_space *space)
|
||||
int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
|
||||
{
|
||||
space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
space->context_buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
if (!space->context_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
space->session_buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
if (space->session_buf == NULL) {
|
||||
kfree(space->context_buf);
|
||||
/* Prevent caller getting a dangling pointer. */
|
||||
space->context_buf = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
space->buf_size = buf_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
|
|||
sizeof(space->context_tbl));
|
||||
memcpy(&chip->work_space.session_tbl, &space->session_tbl,
|
||||
sizeof(space->session_tbl));
|
||||
memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
|
||||
memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
|
||||
memcpy(chip->work_space.context_buf, space->context_buf,
|
||||
space->buf_size);
|
||||
memcpy(chip->work_space.session_buf, space->session_buf,
|
||||
space->buf_size);
|
||||
|
||||
rc = tpm2_load_space(chip);
|
||||
if (rc) {
|
||||
|
@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
|
|||
continue;
|
||||
|
||||
rc = tpm2_save_context(chip, space->context_tbl[i],
|
||||
space->context_buf, PAGE_SIZE,
|
||||
space->context_buf, space->buf_size,
|
||||
&offset);
|
||||
if (rc == -ENOENT) {
|
||||
space->context_tbl[i] = 0;
|
||||
|
@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
|
|||
continue;
|
||||
|
||||
rc = tpm2_save_context(chip, space->session_tbl[i],
|
||||
space->session_buf, PAGE_SIZE,
|
||||
space->session_buf, space->buf_size,
|
||||
&offset);
|
||||
|
||||
if (rc == -ENOENT) {
|
||||
/* handle error saving session, just forget it */
|
||||
space->session_tbl[i] = 0;
|
||||
|
@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
|
|||
sizeof(space->context_tbl));
|
||||
memcpy(&space->session_tbl, &chip->work_space.session_tbl,
|
||||
sizeof(space->session_tbl));
|
||||
memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
|
||||
memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
|
||||
memcpy(space->context_buf, chip->work_space.context_buf,
|
||||
space->buf_size);
|
||||
memcpy(space->session_buf, chip->work_space.session_buf,
|
||||
space->buf_size);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
|
|
|
@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
|
|||
if (priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = tpm2_init_space(&priv->space);
|
||||
rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
|
||||
if (rc) {
|
||||
kfree(priv);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
|
|||
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
|
||||
{
|
||||
int ret;
|
||||
unsigned long min_rate, max_rate;
|
||||
|
||||
struct clk_init_data init = {
|
||||
.flags = CLK_GET_RATE_NOCACHE,
|
||||
.num_parents = 0,
|
||||
|
@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
|
|||
|
||||
sclk->hw.init = &init;
|
||||
ret = devm_clk_hw_register(dev, &sclk->hw);
|
||||
if (!ret)
|
||||
clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
|
||||
sclk->info->range.max_rate);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sclk->info->rate_discrete) {
|
||||
int num_rates = sclk->info->list.num_rates;
|
||||
|
||||
if (num_rates <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
min_rate = sclk->info->list.rates[0];
|
||||
max_rate = sclk->info->list.rates[num_rates - 1];
|
||||
} else {
|
||||
min_rate = sclk->info->range.min_rate;
|
||||
max_rate = sclk->info->range.max_rate;
|
||||
}
|
||||
|
||||
clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -147,12 +147,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
|
|||
!= (c->aggr_state & BIT(state));
|
||||
}
|
||||
|
||||
static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
|
||||
struct tcs_cmd *cmd, bool wait)
|
||||
{
|
||||
if (wait)
|
||||
return rpmh_write(c->dev, state, cmd, 1);
|
||||
|
||||
return rpmh_write_async(c->dev, state, cmd, 1);
|
||||
}
|
||||
|
||||
static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
|
||||
{
|
||||
struct tcs_cmd cmd = { 0 };
|
||||
u32 cmd_state, on_val;
|
||||
enum rpmh_state state = RPMH_SLEEP_STATE;
|
||||
int ret;
|
||||
bool wait;
|
||||
|
||||
cmd.addr = c->res_addr;
|
||||
cmd_state = c->aggr_state;
|
||||
|
@ -163,7 +173,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
|
|||
if (cmd_state & BIT(state))
|
||||
cmd.data = on_val;
|
||||
|
||||
ret = rpmh_write_async(c->dev, state, &cmd, 1);
|
||||
wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
|
||||
ret = clk_rpmh_send(c, state, &cmd, wait);
|
||||
if (ret) {
|
||||
dev_err(c->dev, "set %s state of %s failed: (%d)\n",
|
||||
!state ? "sleep" :
|
||||
|
@ -271,7 +282,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
|
|||
cmd.addr = c->res_addr;
|
||||
cmd.data = BCM_TCS_CMD(enable, cmd_state);
|
||||
|
||||
ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
|
||||
ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
|
||||
if (ret) {
|
||||
dev_err(c->dev, "set active state of %s failed: (%d)\n",
|
||||
c->res_name, ret);
|
||||
|
|
|
@ -29,6 +29,7 @@ config ARM_ARMADA_37XX_CPUFREQ
|
|||
config ARM_ARMADA_8K_CPUFREQ
|
||||
tristate "Armada 8K CPUFreq driver"
|
||||
depends on ARCH_MVEBU && CPUFREQ_DT
|
||||
select ARMADA_AP_CPU_CLK
|
||||
help
|
||||
This enables the CPUFreq driver support for Marvell
|
||||
Armada8k SOCs.
|
||||
|
|
|
@ -458,6 +458,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
|||
/* Now that everything is setup, enable the DVFS at hardware level */
|
||||
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
|
||||
|
||||
memset(&pdata, 0, sizeof(pdata));
|
||||
pdata.suspend = armada37xx_cpufreq_suspend;
|
||||
pdata.resume = armada37xx_cpufreq_resume;
|
||||
|
||||
|
|
|
@ -201,6 +201,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
|
|||
int status;
|
||||
|
||||
memset(req_info, 0, sizeof(struct cpt_request_info));
|
||||
req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
|
||||
memset(fctx, 0, sizeof(struct fc_context));
|
||||
create_input_list(req, enc, enc_iv_len);
|
||||
create_output_list(req, enc_iv_len);
|
||||
|
|
|
@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
|
|||
|
||||
/* Setup gather (input) components */
|
||||
g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
|
||||
info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
|
||||
info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!info->gather_components) {
|
||||
ret = -ENOMEM;
|
||||
goto scatter_gather_clean;
|
||||
|
@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
|
|||
|
||||
/* Setup scatter (output) components */
|
||||
s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
|
||||
info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
|
||||
info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!info->scatter_components) {
|
||||
ret = -ENOMEM;
|
||||
goto scatter_gather_clean;
|
||||
|
@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
|
|||
|
||||
/* Create and initialize DPTR */
|
||||
info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
|
||||
info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
|
||||
info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!info->in_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto scatter_gather_clean;
|
||||
|
@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
|
|||
}
|
||||
|
||||
/* Create and initialize RPTR */
|
||||
info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
|
||||
info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (!info->out_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto scatter_gather_clean;
|
||||
|
@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
|
|||
struct cpt_vq_command vq_cmd;
|
||||
union cpt_inst_s cptinst;
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (unlikely(!info)) {
|
||||
dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
|
||||
return -ENOMEM;
|
||||
|
@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
|
|||
* Get buffer for union cpt_res_s response
|
||||
* structure and its physical address
|
||||
*/
|
||||
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
|
||||
info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (unlikely(!info->completion_addr)) {
|
||||
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -62,6 +62,8 @@ struct cpt_request_info {
|
|||
union ctrl_info ctrl; /* User control information */
|
||||
struct cptvf_request req; /* Request Information (Core specific) */
|
||||
|
||||
bool may_sleep;
|
||||
|
||||
struct buf_ptr in[MAX_BUF_CNT];
|
||||
struct buf_ptr out[MAX_BUF_CNT];
|
||||
|
||||
|
|
|
@ -468,6 +468,7 @@ struct ccp_sg_workarea {
|
|||
unsigned int sg_used;
|
||||
|
||||
struct scatterlist *dma_sg;
|
||||
struct scatterlist *dma_sg_head;
|
||||
struct device *dma_dev;
|
||||
unsigned int dma_count;
|
||||
enum dma_data_direction dma_dir;
|
||||
|
|
|
@ -64,7 +64,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
|
|||
static void ccp_sg_free(struct ccp_sg_workarea *wa)
|
||||
{
|
||||
if (wa->dma_count)
|
||||
dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
|
||||
dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
|
||||
|
||||
wa->dma_count = 0;
|
||||
}
|
||||
|
@ -93,6 +93,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
|
|||
return 0;
|
||||
|
||||
wa->dma_sg = sg;
|
||||
wa->dma_sg_head = sg;
|
||||
wa->dma_dev = dev;
|
||||
wa->dma_dir = dma_dir;
|
||||
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
|
||||
|
@ -105,14 +106,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
|
|||
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
|
||||
{
|
||||
unsigned int nbytes = min_t(u64, len, wa->bytes_left);
|
||||
unsigned int sg_combined_len = 0;
|
||||
|
||||
if (!wa->sg)
|
||||
return;
|
||||
|
||||
wa->sg_used += nbytes;
|
||||
wa->bytes_left -= nbytes;
|
||||
if (wa->sg_used == wa->sg->length) {
|
||||
wa->sg = sg_next(wa->sg);
|
||||
if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
|
||||
/* Advance to the next DMA scatterlist entry */
|
||||
wa->dma_sg = sg_next(wa->dma_sg);
|
||||
|
||||
/* In the case that the DMA mapped scatterlist has entries
|
||||
* that have been merged, the non-DMA mapped scatterlist
|
||||
* must be advanced multiple times for each merged entry.
|
||||
* This ensures that the current non-DMA mapped entry
|
||||
* corresponds to the current DMA mapped entry.
|
||||
*/
|
||||
do {
|
||||
sg_combined_len += wa->sg->length;
|
||||
wa->sg = sg_next(wa->sg);
|
||||
} while (wa->sg_used > sg_combined_len);
|
||||
|
||||
wa->sg_used = 0;
|
||||
}
|
||||
}
|
||||
|
@ -301,7 +316,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
|
|||
/* Update the structures and generate the count */
|
||||
buf_count = 0;
|
||||
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
|
||||
nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
|
||||
nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
|
||||
dm_wa->length - buf_count);
|
||||
nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
|
||||
|
||||
|
@ -333,11 +348,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
|
|||
* and destination. The resulting len values will always be <= UINT_MAX
|
||||
* because the dma length is an unsigned int.
|
||||
*/
|
||||
sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
|
||||
sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
|
||||
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
|
||||
|
||||
if (dst) {
|
||||
sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
|
||||
sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
|
||||
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
|
||||
op_len = min(sg_src_len, sg_dst_len);
|
||||
} else {
|
||||
|
@ -367,7 +382,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
|
|||
/* Enough data in the sg element, but we need to
|
||||
* adjust for any previously copied data
|
||||
*/
|
||||
op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
|
||||
op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
|
||||
op->src.u.dma.offset = src->sg_wa.sg_used;
|
||||
op->src.u.dma.length = op_len & ~(block_size - 1);
|
||||
|
||||
|
@ -388,7 +403,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
|
|||
/* Enough room in the sg element, but we need to
|
||||
* adjust for any previously used area
|
||||
*/
|
||||
op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
|
||||
op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
|
||||
op->dst.u.dma.offset = dst->sg_wa.sg_used;
|
||||
op->dst.u.dma.length = op->src.u.dma.length;
|
||||
}
|
||||
|
@ -2040,7 +2055,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
|
|||
dst.sg_wa.sg_used = 0;
|
||||
for (i = 1; i <= src.sg_wa.dma_count; i++) {
|
||||
if (!dst.sg_wa.sg ||
|
||||
(dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
|
||||
(sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
|
||||
ret = -EINVAL;
|
||||
goto e_dst;
|
||||
}
|
||||
|
@ -2066,8 +2081,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
|
|||
goto e_dst;
|
||||
}
|
||||
|
||||
dst.sg_wa.sg_used += src.sg_wa.sg->length;
|
||||
if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
|
||||
dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
|
||||
if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
|
||||
dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
|
||||
dst.sg_wa.sg_used = 0;
|
||||
}
|
||||
|
|
|
@ -167,7 +167,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
|
|||
skcipher_alg.base);
|
||||
struct device *dev = drvdata_to_dev(cc_alg->drvdata);
|
||||
unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
|
||||
int rc = 0;
|
||||
|
||||
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
|
||||
crypto_tfm_alg_name(tfm));
|
||||
|
@ -179,10 +178,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
|
|||
ctx_p->flow_mode = cc_alg->flow_mode;
|
||||
ctx_p->drvdata = cc_alg->drvdata;
|
||||
|
||||
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
|
||||
/* Alloc hash tfm for essiv */
|
||||
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
|
||||
if (IS_ERR(ctx_p->shash_tfm)) {
|
||||
dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
|
||||
return PTR_ERR(ctx_p->shash_tfm);
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate key buffer, cache line aligned */
|
||||
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
|
||||
if (!ctx_p->user.key)
|
||||
return -ENOMEM;
|
||||
goto free_shash;
|
||||
|
||||
dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
|
||||
ctx_p->user.key);
|
||||
|
@ -194,21 +202,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
|
|||
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
|
||||
dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
|
||||
max_key_buf_size, ctx_p->user.key);
|
||||
return -ENOMEM;
|
||||
goto free_key;
|
||||
}
|
||||
dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
|
||||
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
|
||||
|
||||
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
|
||||
/* Alloc hash tfm for essiv */
|
||||
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
|
||||
if (IS_ERR(ctx_p->shash_tfm)) {
|
||||
dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
|
||||
return PTR_ERR(ctx_p->shash_tfm);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
return rc;
|
||||
free_key:
|
||||
kfree(ctx_p->user.key);
|
||||
free_shash:
|
||||
crypto_free_shash(ctx_p->shash_tfm);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void cc_cipher_exit(struct crypto_tfm *tfm)
|
||||
|
|
|
@ -157,7 +157,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
|
|||
dma_addr_t *psec_sgl,
|
||||
struct scatterlist *sgl,
|
||||
int count,
|
||||
struct sec_dev_info *info)
|
||||
struct sec_dev_info *info,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct sec_hw_sgl *sgl_current = NULL;
|
||||
struct sec_hw_sgl *sgl_next;
|
||||
|
@ -172,7 +173,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
|
|||
sge_index = i % SEC_MAX_SGE_NUM;
|
||||
if (sge_index == 0) {
|
||||
sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
|
||||
GFP_KERNEL, &sgl_next_dma);
|
||||
gfp, &sgl_next_dma);
|
||||
if (!sgl_next) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_hw_sgls;
|
||||
|
@ -555,14 +556,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
|
|||
}
|
||||
|
||||
static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
|
||||
int *steps)
|
||||
int *steps, gfp_t gfp)
|
||||
{
|
||||
size_t *sizes;
|
||||
int i;
|
||||
|
||||
/* Split into suitable sized blocks */
|
||||
*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
|
||||
sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
|
||||
sizes = kcalloc(*steps, sizeof(*sizes), gfp);
|
||||
if (!sizes)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -578,7 +579,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
|
|||
int steps, struct scatterlist ***splits,
|
||||
int **splits_nents,
|
||||
int sgl_len_in,
|
||||
struct device *dev)
|
||||
struct device *dev, gfp_t gfp)
|
||||
{
|
||||
int ret, count;
|
||||
|
||||
|
@ -586,12 +587,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
|
|||
if (!count)
|
||||
return -EINVAL;
|
||||
|
||||
*splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
|
||||
*splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
|
||||
if (!*splits) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unmap_sg;
|
||||
}
|
||||
*splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
|
||||
*splits_nents = kcalloc(steps, sizeof(int), gfp);
|
||||
if (!*splits_nents) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_splits;
|
||||
|
@ -599,7 +600,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
|
|||
|
||||
/* output the scatter list before and after this */
|
||||
ret = sg_split(sgl, count, 0, steps, split_sizes,
|
||||
*splits, *splits_nents, GFP_KERNEL);
|
||||
*splits, *splits_nents, gfp);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_splits_nents;
|
||||
|
@ -640,13 +641,13 @@ static struct sec_request_el
|
|||
int el_size, bool different_dest,
|
||||
struct scatterlist *sgl_in, int n_ents_in,
|
||||
struct scatterlist *sgl_out, int n_ents_out,
|
||||
struct sec_dev_info *info)
|
||||
struct sec_dev_info *info, gfp_t gfp)
|
||||
{
|
||||
struct sec_request_el *el;
|
||||
struct sec_bd_info *req;
|
||||
int ret;
|
||||
|
||||
el = kzalloc(sizeof(*el), GFP_KERNEL);
|
||||
el = kzalloc(sizeof(*el), gfp);
|
||||
if (!el)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
el->el_length = el_size;
|
||||
|
@ -678,7 +679,7 @@ static struct sec_request_el
|
|||
el->sgl_in = sgl_in;
|
||||
|
||||
ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
|
||||
n_ents_in, info);
|
||||
n_ents_in, info, gfp);
|
||||
if (ret)
|
||||
goto err_free_el;
|
||||
|
||||
|
@ -689,7 +690,7 @@ static struct sec_request_el
|
|||
el->sgl_out = sgl_out;
|
||||
ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
|
||||
el->sgl_out,
|
||||
n_ents_out, info);
|
||||
n_ents_out, info, gfp);
|
||||
if (ret)
|
||||
goto err_free_hw_sgl_in;
|
||||
|
||||
|
@ -730,6 +731,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||
int *splits_out_nents = NULL;
|
||||
struct sec_request_el *el, *temp;
|
||||
bool split = skreq->src != skreq->dst;
|
||||
gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
mutex_init(&sec_req->lock);
|
||||
sec_req->req_base = &skreq->base;
|
||||
|
@ -738,13 +740,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||
sec_req->len_in = sg_nents(skreq->src);
|
||||
|
||||
ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
|
||||
&steps);
|
||||
&steps, gfp);
|
||||
if (ret)
|
||||
return ret;
|
||||
sec_req->num_elements = steps;
|
||||
ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
|
||||
&splits_in_nents, sec_req->len_in,
|
||||
info->dev);
|
||||
info->dev, gfp);
|
||||
if (ret)
|
||||
goto err_free_split_sizes;
|
||||
|
||||
|
@ -752,7 +754,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||
sec_req->len_out = sg_nents(skreq->dst);
|
||||
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
|
||||
&splits_out, &splits_out_nents,
|
||||
sec_req->len_out, info->dev);
|
||||
sec_req->len_out, info->dev, gfp);
|
||||
if (ret)
|
||||
goto err_unmap_in_sg;
|
||||
}
|
||||
|
@ -785,7 +787,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||
splits_in[i], splits_in_nents[i],
|
||||
split ? splits_out[i] : NULL,
|
||||
split ? splits_out_nents[i] : 0,
|
||||
info);
|
||||
info, gfp);
|
||||
if (IS_ERR(el)) {
|
||||
ret = PTR_ERR(el);
|
||||
goto err_free_elements;
|
||||
|
|
|
@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
|
|||
}
|
||||
return 0;
|
||||
out_err:
|
||||
/* Do not free the list head unless we allocated it. */
|
||||
tail_old = tail_old->next;
|
||||
if (flag) {
|
||||
kfree(*init_tab_base);
|
||||
*init_tab_base = NULL;
|
||||
}
|
||||
|
||||
while (tail_old) {
|
||||
mem_init = tail_old->next;
|
||||
kfree(tail_old);
|
||||
tail_old = mem_init;
|
||||
}
|
||||
if (flag)
|
||||
kfree(*init_tab_base);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
|
|||
|
||||
/* Error exit stack */
|
||||
err_kobj_reg:
|
||||
kobject_put(&edac_dev->kobj);
|
||||
module_put(edac_dev->owner);
|
||||
|
||||
err_out:
|
||||
|
|
|
@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
|
|||
|
||||
/* Error unwind statck */
|
||||
kobject_init_and_add_fail:
|
||||
kfree(edac_pci_top_main_kobj);
|
||||
kobject_put(edac_pci_top_main_kobj);
|
||||
|
||||
kzalloc_fail:
|
||||
module_put(THIS_MODULE);
|
||||
|
|
|
@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
|||
for (i = 0; i < num_domains; i++, scmi_pd++) {
|
||||
u32 state;
|
||||
|
||||
domains[i] = &scmi_pd->genpd;
|
||||
if (handle->power_ops->state_get(handle, i, &state)) {
|
||||
dev_warn(dev, "failed to get state for domain %d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
scmi_pd->domain = i;
|
||||
scmi_pd->handle = handle;
|
||||
|
@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
|||
scmi_pd->genpd.power_off = scmi_pd_power_off;
|
||||
scmi_pd->genpd.power_on = scmi_pd_power_on;
|
||||
|
||||
if (handle->power_ops->state_get(handle, i, &state)) {
|
||||
dev_warn(dev, "failed to get state for domain %d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
pm_genpd_init(&scmi_pd->genpd, NULL,
|
||||
state == SCMI_POWER_STATE_GENERIC_OFF);
|
||||
|
||||
domains[i] = &scmi_pd->genpd;
|
||||
}
|
||||
|
||||
scmi_pd_data->domains = domains;
|
||||
|
|
|
@ -402,7 +402,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
|
||||
}
|
||||
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
if (irq_src)
|
||||
amdgpu_irq_get(adev, irq_src, irq_type);
|
||||
|
||||
ring->fence_drv.irq_src = irq_src;
|
||||
ring->fence_drv.irq_type = irq_type;
|
||||
|
@ -514,8 +516,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
|
|||
/* no need to trigger GPU reset as we are unloading */
|
||||
amdgpu_fence_driver_force_completion(ring);
|
||||
}
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
drm_sched_fini(&ring->sched);
|
||||
del_timer_sync(&ring->fence_drv.fallback_timer);
|
||||
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
|
||||
|
@ -551,8 +554,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/* disable the interrupt */
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -578,8 +582,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
|
|||
continue;
|
||||
|
||||
/* enable the interrupt */
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
if (ring->fence_drv.irq_src)
|
||||
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
|
||||
ring->fence_drv.irq_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -500,6 +500,8 @@ bool dm_pp_get_static_clocks(
|
|||
&pp_clk_info);
|
||||
else if (adev->smu.funcs)
|
||||
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
|
||||
else
|
||||
return false;
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -166,7 +166,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
|
|||
chip_name = "vega20";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
|
||||
|
|
|
@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm)
|
|||
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
|
||||
struct malidp_plane *plane = NULL;
|
||||
enum drm_plane_type plane_type;
|
||||
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
|
||||
unsigned long crtcs = BIT(drm->mode_config.num_crtc);
|
||||
unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
|
||||
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
|
||||
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
|
||||
|
|
|
@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
|
|||
|
||||
static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
|
||||
{
|
||||
u8 ret;
|
||||
u8 ret = 0;
|
||||
|
||||
sii8620_read_buf(ctx, addr, &ret, 1);
|
||||
return ret;
|
||||
|
|
|
@ -607,6 +607,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
|
|||
buf[i]);
|
||||
}
|
||||
|
||||
/* Clear old status bits before start so we don't get confused */
|
||||
regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
|
||||
AUX_IRQ_STATUS_NAT_I2C_FAIL |
|
||||
AUX_IRQ_STATUS_AUX_RPLY_TOUT |
|
||||
AUX_IRQ_STATUS_AUX_SHORT);
|
||||
|
||||
regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
|
||||
|
||||
ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
|
||||
|
|
|
@ -354,13 +354,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
|
|||
|
||||
buf[len] = '\0';
|
||||
|
||||
if (!strcmp(buf, "on"))
|
||||
if (sysfs_streq(buf, "on"))
|
||||
connector->force = DRM_FORCE_ON;
|
||||
else if (!strcmp(buf, "digital"))
|
||||
else if (sysfs_streq(buf, "digital"))
|
||||
connector->force = DRM_FORCE_ON_DIGITAL;
|
||||
else if (!strcmp(buf, "off"))
|
||||
else if (sysfs_streq(buf, "off"))
|
||||
connector->force = DRM_FORCE_OFF;
|
||||
else if (!strcmp(buf, "unspecified"))
|
||||
else if (sysfs_streq(buf, "unspecified"))
|
||||
connector->force = DRM_FORCE_UNSPECIFIED;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
|
|
@ -703,6 +703,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
|
|||
if (!objs)
|
||||
return -ENOMEM;
|
||||
|
||||
*objs_out = objs;
|
||||
|
||||
handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
|
||||
if (!handles) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -716,8 +718,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
|
|||
}
|
||||
|
||||
ret = objects_lookup(filp, handles, count, objs);
|
||||
*objs_out = objs;
|
||||
|
||||
out:
|
||||
kvfree(handles);
|
||||
return ret;
|
||||
|
|
|
@ -1034,11 +1034,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
|
|||
*/
|
||||
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
|
||||
{
|
||||
u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
|
||||
scanline & 0xff };
|
||||
u8 payload[2] = { scanline >> 8, scanline & 0xff };
|
||||
ssize_t err;
|
||||
|
||||
err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
|
||||
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
|
||||
sizeof(payload));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -694,7 +694,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|||
ret = pm_runtime_get_sync(gpu->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(gpu->dev, "Failed to enable GPU power domain\n");
|
||||
return ret;
|
||||
goto pm_put;
|
||||
}
|
||||
|
||||
etnaviv_hw_identify(gpu);
|
||||
|
@ -808,6 +808,7 @@ destroy_iommu:
|
|||
gpu->mmu = NULL;
|
||||
fail:
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
pm_put:
|
||||
pm_runtime_put_autosuspend(gpu->dev);
|
||||
|
||||
return ret;
|
||||
|
@ -848,7 +849,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|||
|
||||
ret = pm_runtime_get_sync(gpu->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto pm_put;
|
||||
|
||||
dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
|
||||
dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
|
||||
|
@ -971,6 +972,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|||
ret = 0;
|
||||
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
pm_put:
|
||||
pm_runtime_put_autosuspend(gpu->dev);
|
||||
|
||||
return ret;
|
||||
|
@ -984,7 +986,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
|
|||
dev_err(gpu->dev, "recover hung GPU!\n");
|
||||
|
||||
if (pm_runtime_get_sync(gpu->dev) < 0)
|
||||
return;
|
||||
goto pm_put;
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
|
@ -1002,6 +1004,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
|
|||
|
||||
mutex_unlock(&gpu->lock);
|
||||
pm_runtime_mark_last_busy(gpu->dev);
|
||||
pm_put:
|
||||
pm_runtime_put_autosuspend(gpu->dev);
|
||||
}
|
||||
|
||||
|
@ -1274,8 +1277,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
|
|||
|
||||
if (!submit->runtime_resumed) {
|
||||
ret = pm_runtime_get_sync(gpu->dev);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(gpu->dev);
|
||||
return NULL;
|
||||
}
|
||||
submit->runtime_resumed = true;
|
||||
}
|
||||
|
||||
|
@ -1292,6 +1297,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
|
|||
ret = event_alloc(gpu, nr_events, event);
|
||||
if (ret) {
|
||||
DRM_ERROR("no free events\n");
|
||||
pm_runtime_put_noidle(gpu->dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1453,7 +1459,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
|
|||
if (gpu->clk_bus) {
|
||||
ret = clk_prepare_enable(gpu->clk_bus);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_clk_reg;
|
||||
}
|
||||
|
||||
if (gpu->clk_core) {
|
||||
|
@ -1476,6 +1482,9 @@ disable_clk_core:
|
|||
disable_clk_bus:
|
||||
if (gpu->clk_bus)
|
||||
clk_disable_unprepare(gpu->clk_bus);
|
||||
disable_clk_reg:
|
||||
if (gpu->clk_reg)
|
||||
clk_disable_unprepare(gpu->clk_reg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -210,9 +210,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
|||
if (!pdev->dev.of_node)
|
||||
return -ENODEV;
|
||||
|
||||
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
|
||||
if (!hdmi)
|
||||
return -ENOMEM;
|
||||
hdmi = dev_get_drvdata(dev);
|
||||
memset(hdmi, 0, sizeof(*hdmi));
|
||||
|
||||
match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
|
||||
plat_data = match->data;
|
||||
|
@ -237,8 +236,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
|||
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, NULL);
|
||||
|
||||
platform_set_drvdata(pdev, hdmi);
|
||||
|
||||
hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
|
||||
|
||||
/*
|
||||
|
@ -268,6 +265,14 @@ static const struct component_ops dw_hdmi_imx_ops = {
|
|||
|
||||
static int dw_hdmi_imx_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_hdmi *hdmi;
|
||||
|
||||
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
|
||||
if (!hdmi)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, hdmi);
|
||||
|
||||
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -280,9 +280,10 @@ static void imx_drm_unbind(struct device *dev)
|
|||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
drm_dev_put(drm);
|
||||
|
|
|
@ -593,9 +593,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
|
||||
if (!imx_ldb)
|
||||
return -ENOMEM;
|
||||
imx_ldb = dev_get_drvdata(dev);
|
||||
memset(imx_ldb, 0, sizeof(*imx_ldb));
|
||||
|
||||
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
|
||||
if (IS_ERR(imx_ldb->regmap)) {
|
||||
|
@ -703,8 +702,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
}
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, imx_ldb);
|
||||
|
||||
return 0;
|
||||
|
||||
free_child:
|
||||
|
@ -736,6 +733,14 @@ static const struct component_ops imx_ldb_ops = {
|
|||
|
||||
static int imx_ldb_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_ldb *imx_ldb;
|
||||
|
||||
imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
|
||||
if (!imx_ldb)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, imx_ldb);
|
||||
|
||||
return component_add(&pdev->dev, &imx_ldb_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -490,6 +490,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void imx_tve_disable_regulator(void *data)
|
||||
{
|
||||
struct imx_tve *tve = data;
|
||||
|
||||
regulator_disable(tve->dac_reg);
|
||||
}
|
||||
|
||||
static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
|
||||
{
|
||||
return (reg % 4 == 0) && (reg <= 0xdc);
|
||||
|
@ -542,9 +549,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
|
|||
int irq;
|
||||
int ret;
|
||||
|
||||
tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
|
||||
if (!tve)
|
||||
return -ENOMEM;
|
||||
tve = dev_get_drvdata(dev);
|
||||
memset(tve, 0, sizeof(*tve));
|
||||
|
||||
tve->dev = dev;
|
||||
spin_lock_init(&tve->lock);
|
||||
|
@ -614,6 +620,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
|
|||
ret = regulator_enable(tve->dac_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tve->clk = devm_clk_get(dev, "tve");
|
||||
|
@ -655,27 +664,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_set_drvdata(dev, tve);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx_tve_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct imx_tve *tve = dev_get_drvdata(dev);
|
||||
|
||||
if (!IS_ERR(tve->dac_reg))
|
||||
regulator_disable(tve->dac_reg);
|
||||
}
|
||||
|
||||
static const struct component_ops imx_tve_ops = {
|
||||
.bind = imx_tve_bind,
|
||||
.unbind = imx_tve_unbind,
|
||||
};
|
||||
|
||||
static int imx_tve_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_tve *tve;
|
||||
|
||||
tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
|
||||
if (!tve)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, tve);
|
||||
|
||||
return component_add(&pdev->dev, &imx_tve_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -434,21 +434,13 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
|
|||
struct ipu_client_platformdata *pdata = dev->platform_data;
|
||||
struct drm_device *drm = data;
|
||||
struct ipu_crtc *ipu_crtc;
|
||||
int ret;
|
||||
|
||||
ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
|
||||
if (!ipu_crtc)
|
||||
return -ENOMEM;
|
||||
ipu_crtc = dev_get_drvdata(dev);
|
||||
memset(ipu_crtc, 0, sizeof(*ipu_crtc));
|
||||
|
||||
ipu_crtc->dev = dev;
|
||||
|
||||
ret = ipu_crtc_init(ipu_crtc, pdata, drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_set_drvdata(dev, ipu_crtc);
|
||||
|
||||
return 0;
|
||||
return ipu_crtc_init(ipu_crtc, pdata, drm);
|
||||
}
|
||||
|
||||
static void ipu_drm_unbind(struct device *dev, struct device *master,
|
||||
|
@ -470,6 +462,7 @@ static const struct component_ops ipu_crtc_ops = {
|
|||
static int ipu_drm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ipu_crtc *ipu_crtc;
|
||||
int ret;
|
||||
|
||||
if (!dev->platform_data)
|
||||
|
@ -479,6 +472,12 @@ static int ipu_drm_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
|
||||
if (!ipu_crtc)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_set_drvdata(dev, ipu_crtc);
|
||||
|
||||
return component_add(dev, &ipu_crtc_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -205,9 +205,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
|||
u32 bus_format = 0;
|
||||
const char *fmt;
|
||||
|
||||
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
|
||||
if (!imxpd)
|
||||
return -ENOMEM;
|
||||
imxpd = dev_get_drvdata(dev);
|
||||
memset(imxpd, 0, sizeof(*imxpd));
|
||||
|
||||
edidp = of_get_property(np, "edid", &imxpd->edid_len);
|
||||
if (edidp)
|
||||
|
@ -237,8 +236,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_set_drvdata(dev, imxpd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -260,6 +257,14 @@ static const struct component_ops imx_pd_ops = {
|
|||
|
||||
static int imx_pd_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_parallel_display *imxpd;
|
||||
|
||||
imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
|
||||
if (!imxpd)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, imxpd);
|
||||
|
||||
return component_add(&pdev->dev, &imx_pd_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -713,10 +713,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
|
|||
/* Turn on the resources */
|
||||
pm_runtime_get_sync(gmu->dev);
|
||||
|
||||
/*
|
||||
* "enable" the GX power domain which won't actually do anything but it
|
||||
* will make sure that the refcounting is correct in case we need to
|
||||
* bring down the GX after a GMU failure
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(gmu->gxpd))
|
||||
pm_runtime_get_sync(gmu->gxpd);
|
||||
|
||||
/* Use a known rate to bring up the GMU */
|
||||
clk_set_rate(gmu->core_clk, 200000000);
|
||||
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
|
||||
if (ret) {
|
||||
pm_runtime_put(gmu->gxpd);
|
||||
pm_runtime_put(gmu->dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -752,19 +761,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
|
|||
/* Set the GPU to the highest power frequency */
|
||||
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
|
||||
|
||||
/*
|
||||
* "enable" the GX power domain which won't actually do anything but it
|
||||
* will make sure that the refcounting is correct in case we need to
|
||||
* bring down the GX after a GMU failure
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(gmu->gxpd))
|
||||
pm_runtime_get(gmu->gxpd);
|
||||
|
||||
out:
|
||||
/* On failure, shut down the GMU to leave it in a good state */
|
||||
if (ret) {
|
||||
disable_irq(gmu->gmu_irq);
|
||||
a6xx_rpmh_stop(gmu);
|
||||
pm_runtime_put(gmu->gxpd);
|
||||
pm_runtime_put(gmu->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -397,7 +397,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
|
|||
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
|
||||
|
||||
if (!fevent) {
|
||||
DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
|
||||
DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -937,10 +937,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|||
static int msm_gem_new_impl(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags,
|
||||
struct reservation_object *resv,
|
||||
struct drm_gem_object **obj,
|
||||
bool struct_mutex_locked)
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
|
||||
switch (flags & MSM_BO_CACHE_MASK) {
|
||||
|
@ -969,15 +967,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
|||
INIT_LIST_HEAD(&msm_obj->submit_entry);
|
||||
INIT_LIST_HEAD(&msm_obj->vmas);
|
||||
|
||||
if (struct_mutex_locked) {
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
} else {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
*obj = &msm_obj->base;
|
||||
|
||||
return 0;
|
||||
|
@ -987,6 +976,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
uint32_t size, uint32_t flags, bool struct_mutex_locked)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
bool use_vram = false;
|
||||
int ret;
|
||||
|
@ -1007,14 +997,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
if (size == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
|
||||
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
|
||||
if (use_vram) {
|
||||
struct msm_gem_vma *vma;
|
||||
struct page **pages;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
|
@ -1049,6 +1040,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
|
||||
}
|
||||
|
||||
if (struct_mutex_locked) {
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
} else {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
|
@ -1071,6 +1071,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
|||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gem_object *msm_obj;
|
||||
struct drm_gem_object *obj;
|
||||
uint32_t size;
|
||||
|
@ -1084,7 +1085,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||
|
||||
size = PAGE_ALIGN(dmabuf->size);
|
||||
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
|
||||
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -1109,6 +1110,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
|||
}
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
|
|
|
@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
|
|||
{
|
||||
u32 mode = 0x00;
|
||||
|
||||
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
|
||||
if (asyh->base.depth > asyh->or.bpc * 3)
|
||||
mode = DITHERING_MODE_DYNAMIC2X2;
|
||||
} else {
|
||||
mode = asyc->dither.mode;
|
||||
}
|
||||
if (asyc->dither.mode) {
|
||||
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
|
||||
if (asyh->base.depth > asyh->or.bpc * 3)
|
||||
mode = DITHERING_MODE_DYNAMIC2X2;
|
||||
} else {
|
||||
mode = asyc->dither.mode;
|
||||
}
|
||||
|
||||
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
|
||||
if (asyh->or.bpc >= 8)
|
||||
mode |= DITHERING_DEPTH_8BPC;
|
||||
} else {
|
||||
mode |= asyc->dither.depth;
|
||||
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
|
||||
if (asyh->or.bpc >= 8)
|
||||
mode |= DITHERING_DEPTH_8BPC;
|
||||
} else {
|
||||
mode |= asyc->dither.depth;
|
||||
}
|
||||
}
|
||||
|
||||
asyh->dither.enable = mode;
|
||||
|
|
|
@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
|
|||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(drm->dev->dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(drm->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
seq_printf(m, "0x%08x\n",
|
||||
nvif_rd32(&drm->client.device.object, 0x101000));
|
||||
|
|
|
@ -1050,8 +1050,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
|||
|
||||
/* need to bring up power immediately if opening device */
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
get_task_comm(tmpname, current);
|
||||
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
|
||||
|
@ -1133,8 +1135,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
long ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0 && ret != -EACCES)
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
|
||||
case DRM_NOUVEAU_NVIF:
|
||||
|
|
|
@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
|
|||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (WARN_ON(ret < 0 && ret != -EACCES))
|
||||
if (WARN_ON(ret < 0 && ret != -EACCES)) {
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
return;
|
||||
}
|
||||
|
||||
if (gem->import_attach)
|
||||
drm_prime_gem_destroy(gem, nvbo->bo.sg);
|
||||
|
|
|
@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
|
|||
else
|
||||
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
|
||||
|
||||
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
|
||||
/*
|
||||
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
|
||||
* and thus our nouveau_sgdma_destroy() hook, so we don't need
|
||||
* to free nvbe here.
|
||||
*/
|
||||
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
|
||||
kfree(nvbe);
|
||||
return NULL;
|
||||
}
|
||||
return &nvbe->ttm.ttm;
|
||||
}
|
||||
|
|
|
@ -1663,7 +1663,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
|
|||
static const struct panel_desc lg_lb070wv8 = {
|
||||
.modes = &lg_lb070wv8_mode,
|
||||
.num_modes = 1,
|
||||
.bpc = 16,
|
||||
.bpc = 8,
|
||||
.size = {
|
||||
.width = 151,
|
||||
.height = 91,
|
||||
|
|
|
@ -4364,7 +4364,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
|
|||
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
|
||||
}
|
||||
j++;
|
||||
if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
||||
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!pi->mem_gddr5) {
|
||||
|
|
|
@ -626,8 +626,10 @@ radeon_crtc_set_config(struct drm_mode_set *set,
|
|||
dev = set->crtc->dev;
|
||||
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_crtc_helper_set_config(set, ctx);
|
||||
|
||||
|
|
|
@ -170,12 +170,7 @@ int radeon_no_wb;
|
|||
int radeon_modeset = -1;
|
||||
int radeon_dynclks = -1;
|
||||
int radeon_r4xx_atom = 0;
|
||||
#ifdef __powerpc__
|
||||
/* Default to PCI on PowerPC (fdo #95017) */
|
||||
int radeon_agpmode = -1;
|
||||
#else
|
||||
int radeon_agpmode = 0;
|
||||
#endif
|
||||
int radeon_vram_limit = 0;
|
||||
int radeon_gart_size = -1; /* auto */
|
||||
int radeon_benchmarking = 0;
|
||||
|
@ -542,8 +537,10 @@ long radeon_drm_ioctl(struct file *filp,
|
|||
long ret;
|
||||
dev = file_priv->minor->dev;
|
||||
ret = pm_runtime_get_sync(dev->dev);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
|
|
|
@ -633,8 +633,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
file_priv->driver_priv = NULL;
|
||||
|
||||
r = pm_runtime_get_sync(dev->dev);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
|
|
|
@ -140,12 +140,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < timings->num_timings; i++) {
|
||||
struct drm_display_mode *mode = drm_mode_create(dev);
|
||||
struct drm_display_mode *mode;
|
||||
struct videomode vm;
|
||||
|
||||
if (videomode_from_timings(timings, &vm, i))
|
||||
break;
|
||||
|
||||
mode = drm_mode_create(dev);
|
||||
if (!mode)
|
||||
break;
|
||||
|
||||
drm_display_mode_from_videomode(&vm, mode);
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER;
|
||||
|
|
|
@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
|||
ttm_tt_init_fields(ttm, bo, page_flags);
|
||||
|
||||
if (ttm_tt_alloc_page_directory(ttm)) {
|
||||
ttm_tt_destroy(ttm);
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
|||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
|
||||
ttm_tt_destroy(ttm);
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
|||
else
|
||||
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
if (ret) {
|
||||
ttm_tt_destroy(ttm);
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include "debug.h"
|
||||
#include "channel.h"
|
||||
|
||||
static DEFINE_MUTEX(debug_lock);
|
||||
|
||||
unsigned int host1x_debug_trace_cmdbuf;
|
||||
|
||||
static pid_t host1x_debug_force_timeout_pid;
|
||||
|
@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
|
|||
struct output *o = data;
|
||||
|
||||
mutex_lock(&ch->cdma.lock);
|
||||
mutex_lock(&debug_lock);
|
||||
|
||||
if (show_fifo)
|
||||
host1x_hw_show_channel_fifo(m, ch, o);
|
||||
|
||||
host1x_hw_show_channel_cdma(m, ch, o);
|
||||
|
||||
mutex_unlock(&debug_lock);
|
||||
mutex_unlock(&ch->cdma.lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev)
|
|||
u8 *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmalloc(2, GFP_KERNEL);
|
||||
buf = kmalloc(4, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
|
||||
ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4,
|
||||
dev->battery_report_type, HID_REQ_GET_REPORT);
|
||||
if (ret != 2) {
|
||||
if (ret < 2) {
|
||||
kfree(buf);
|
||||
return -ENODATA;
|
||||
}
|
||||
|
|
|
@ -635,15 +635,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
|
|||
|
||||
spin_lock_irqsave(&drvdata->spinlock, flags);
|
||||
|
||||
/* There is no point in reading a TMC in HW FIFO mode */
|
||||
mode = readl_relaxed(drvdata->base + TMC_MODE);
|
||||
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
|
||||
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Re-enable the TMC if need be */
|
||||
if (drvdata->mode == CS_MODE_SYSFS) {
|
||||
/* There is no point in reading a TMC in HW FIFO mode */
|
||||
mode = readl_relaxed(drvdata->base + TMC_MODE);
|
||||
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
|
||||
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* The trace run will continue with the same allocated trace
|
||||
* buffer. As such zero-out the buffer so that we don't end
|
||||
|
|
|
@ -1315,6 +1315,10 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void prevent_dealloc_device(struct ib_device *ib_dev)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_register_device - Register an IB device with IB core
|
||||
* @device:Device to register
|
||||
|
@ -1380,11 +1384,11 @@ int ib_register_device(struct ib_device *device, const char *name)
|
|||
* possibility for a parallel unregistration along with this
|
||||
* error flow. Since we have a refcount here we know any
|
||||
* parallel flow is stopped in disable_device and will see the
|
||||
* NULL pointers, causing the responsibility to
|
||||
* special dealloc_driver pointer, causing the responsibility to
|
||||
* ib_dealloc_device() to revert back to this thread.
|
||||
*/
|
||||
dealloc_fn = device->ops.dealloc_driver;
|
||||
device->ops.dealloc_driver = NULL;
|
||||
device->ops.dealloc_driver = prevent_dealloc_device;
|
||||
ib_device_put(device);
|
||||
__ib_unregister_device(device);
|
||||
device->ops.dealloc_driver = dealloc_fn;
|
||||
|
@ -1432,7 +1436,8 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
|
|||
* Drivers using the new flow may not call ib_dealloc_device except
|
||||
* in error unwind prior to registration success.
|
||||
*/
|
||||
if (ib_dev->ops.dealloc_driver) {
|
||||
if (ib_dev->ops.dealloc_driver &&
|
||||
ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
|
||||
WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
|
||||
ib_dealloc_device(ib_dev);
|
||||
}
|
||||
|
|
|
@ -1641,7 +1641,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
|
|||
if (!(rdma_protocol_ib(qp->device,
|
||||
attr->alt_ah_attr.port_num) &&
|
||||
rdma_protocol_ib(qp->device, port))) {
|
||||
ret = EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user