mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 15:03:53 +02:00
Merge branch 'v6.6/standard/base' into v6.6/standard/preempt-rt/base
This commit is contained in:
commit
6b56dc2c00
|
@ -380,7 +380,9 @@ entry, ts0, corresponding to the ts0 variable in the sched_waking
|
|||
trigger above.
|
||||
|
||||
sched_waking histogram
|
||||
----------------------::
|
||||
----------------------
|
||||
|
||||
.. code-block::
|
||||
|
||||
+------------------+
|
||||
| hist_data |<-------------------------------------------------------+
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 111
|
||||
SUBLEVEL = 112
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@
|
|||
};
|
||||
|
||||
can0_pins: can0 {
|
||||
groups = "can0_data";
|
||||
groups = "can0_data_b";
|
||||
function = "can0";
|
||||
};
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@
|
|||
vcc7-supply = <&vbat>;
|
||||
vccio-supply = <&vbat>;
|
||||
|
||||
ti,en-ck32k-xtal = <1>;
|
||||
ti,en-ck32k-xtal;
|
||||
|
||||
regulators {
|
||||
vrtc_reg: regulator@0 {
|
||||
|
|
|
@ -483,8 +483,6 @@ status = "okay";
|
|||
|
||||
op-mode = <0>; /* MCASP_IIS_MODE */
|
||||
tdm-slots = <2>;
|
||||
/* 16 serializers */
|
||||
num-serializer = <16>;
|
||||
serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
|
||||
0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
>;
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
ti,debounce-max = /bits/ 16 <10>;
|
||||
ti,debounce-tol = /bits/ 16 <5>;
|
||||
ti,debounce-rep = /bits/ 16 <1>;
|
||||
ti,keep-vref-on = <1>;
|
||||
ti,keep-vref-on;
|
||||
ti,settle-delay-usec = /bits/ 16 <150>;
|
||||
|
||||
wakeup-source;
|
||||
|
|
|
@ -872,7 +872,7 @@ e_done:
|
|||
/**
|
||||
* at91_mckx_ps_restore: restore MCK1..4 settings
|
||||
*
|
||||
* Side effects: overwrites tmp1, tmp2
|
||||
* Side effects: overwrites tmp1, tmp2 and tmp3
|
||||
*/
|
||||
.macro at91_mckx_ps_restore
|
||||
#ifdef CONFIG_SOC_SAMA7
|
||||
|
@ -916,7 +916,7 @@ r_ps:
|
|||
bic tmp3, tmp3, #AT91_PMC_MCR_V2_ID_MSK
|
||||
orr tmp3, tmp3, tmp1
|
||||
orr tmp3, tmp3, #AT91_PMC_MCR_V2_CMD
|
||||
str tmp2, [pmc, #AT91_PMC_MCR_V2]
|
||||
str tmp3, [pmc, #AT91_PMC_MCR_V2]
|
||||
|
||||
wait_mckrdy tmp1
|
||||
|
||||
|
|
|
@ -21,6 +21,14 @@
|
|||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* Adjust pcie0's iommu-map to account for the disabled port01.
|
||||
*/
|
||||
&pcie0 {
|
||||
iommu-map = <0x100 &pcie0_dart_0 1 1>,
|
||||
<0x200 &pcie0_dart_2 1 1>;
|
||||
};
|
||||
|
||||
&bluetooth0 {
|
||||
brcm,board-type = "apple,santorini";
|
||||
};
|
||||
|
@ -36,10 +44,10 @@
|
|||
*/
|
||||
|
||||
&port02 {
|
||||
bus-range = <3 3>;
|
||||
bus-range = <2 2>;
|
||||
status = "okay";
|
||||
ethernet0: ethernet@0,0 {
|
||||
reg = <0x30000 0x0 0x0 0x0 0x0>;
|
||||
reg = <0x20000 0x0 0x0 0x0 0x0>;
|
||||
/* To be filled by the loader */
|
||||
local-mac-address = [00 10 18 00 00 00];
|
||||
};
|
||||
|
|
|
@ -6,12 +6,12 @@
|
|||
#include <dt-bindings/input/input.h>
|
||||
|
||||
&pwrap {
|
||||
pmic: mt6331 {
|
||||
pmic: pmic {
|
||||
compatible = "mediatek,mt6331";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
mt6331regulator: mt6331regulator {
|
||||
mt6331regulator: regulators {
|
||||
compatible = "mediatek,mt6331-regulator";
|
||||
|
||||
mt6331_vdvfs11_reg: buck-vdvfs11 {
|
||||
|
@ -258,7 +258,7 @@
|
|||
};
|
||||
|
||||
mt6331_vdig18_reg: ldo-vdig18 {
|
||||
regulator-name = "dvdd18_dig";
|
||||
regulator-name = "vdig18";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-ramp-delay = <0>;
|
||||
|
@ -266,11 +266,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
mt6331rtc: mt6331rtc {
|
||||
mt6331rtc: rtc {
|
||||
compatible = "mediatek,mt6331-rtc";
|
||||
};
|
||||
|
||||
mt6331keys: mt6331keys {
|
||||
mt6331keys: keys {
|
||||
compatible = "mediatek,mt6331-keys";
|
||||
power {
|
||||
linux,keycodes = <KEY_POWER>;
|
||||
|
|
|
@ -136,7 +136,7 @@
|
|||
|
||||
&mmc0 {
|
||||
/* eMMC controller */
|
||||
mediatek,latch-ck = <0x14>; /* hs400 */
|
||||
mediatek,latch-ck = <4>; /* hs400 */
|
||||
mediatek,hs200-cmd-int-delay = <1>;
|
||||
mediatek,hs400-cmd-int-delay = <1>;
|
||||
mediatek,hs400-ds-dly3 = <0x1a>;
|
||||
|
|
|
@ -1524,9 +1524,6 @@
|
|||
|
||||
power-domains = <&spm MT8195_POWER_DOMAIN_PCIE_MAC_P0>;
|
||||
|
||||
resets = <&infracfg_ao MT8195_INFRA_RST2_PCIE_P0_SWRST>;
|
||||
reset-names = "mac";
|
||||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/ {
|
||||
model = "Pumpkin MT8516";
|
||||
compatible = "mediatek,mt8516";
|
||||
compatible = "mediatek,mt8516-pumpkin", "mediatek,mt8516";
|
||||
|
||||
memory@40000000 {
|
||||
device_type = "memory";
|
||||
|
|
|
@ -1107,6 +1107,7 @@
|
|||
snps,has-lpm-erratum;
|
||||
snps,hird-threshold = /bits/ 8 <0x10>;
|
||||
snps,usb3_lpm_capable;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
maximum-speed = "super-speed";
|
||||
dr_mode = "otg";
|
||||
};
|
||||
|
|
|
@ -38,7 +38,10 @@
|
|||
#if (SW_SCIF_CAN || SW_RSPI_CAN)
|
||||
&canfd {
|
||||
pinctrl-0 = <&can1_pins>;
|
||||
/delete-node/ channel@0;
|
||||
|
||||
channel0 {
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
#else
|
||||
&canfd {
|
||||
|
|
|
@ -124,6 +124,10 @@ static inline __init bool kaslr_disabled(void)
|
|||
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
|
||||
return true;
|
||||
|
||||
str = strstr(boot_command_line, "kexec_file");
|
||||
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -696,16 +696,16 @@ FUNC_NAME:
|
|||
EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40)
|
||||
faligndata %f24, %f26, %f10
|
||||
EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_32)
|
||||
faligndata %f26, %f28, %f12
|
||||
EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_32)
|
||||
add %o4, 64, %o4
|
||||
EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_24)
|
||||
faligndata %f28, %f30, %f14
|
||||
EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_24)
|
||||
EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_16)
|
||||
add %o0, 64, %o0
|
||||
EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40)
|
||||
EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_8)
|
||||
fsrc2 %f30, %f14
|
||||
bgu,pt %xcc, .Lunalign_sloop
|
||||
prefetch [%o4 + (8 * BLOCK_SIZE)], 20
|
||||
|
@ -728,7 +728,7 @@ FUNC_NAME:
|
|||
add %o4, 8, %o4
|
||||
faligndata %f0, %f2, %f16
|
||||
subcc %o5, 8, %o5
|
||||
EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5)
|
||||
EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_8)
|
||||
fsrc2 %f2, %f0
|
||||
bgu,pt %xcc, .Lunalign_by8
|
||||
add %o0, 8, %o0
|
||||
|
@ -772,7 +772,7 @@ FUNC_NAME:
|
|||
subcc %o5, 0x20, %o5
|
||||
EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
|
||||
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
|
||||
EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
|
||||
EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
|
||||
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
|
||||
bne,pt %xcc, 1b
|
||||
add %o0, 0x20, %o0
|
||||
|
@ -804,12 +804,12 @@ FUNC_NAME:
|
|||
brz,pt %o3, 2f
|
||||
sub %o2, %o3, %o2
|
||||
|
||||
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1)
|
||||
1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_o3)
|
||||
add %o1, 1, %o1
|
||||
subcc %o3, 1, %o3
|
||||
add %o0, 1, %o0
|
||||
bne,pt %xcc, 1b
|
||||
EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1)
|
||||
EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_o3_plus_1)
|
||||
2:
|
||||
and %o1, 0x7, %o3
|
||||
brz,pn %o3, .Lmedium_noprefetch_cp
|
||||
|
|
|
@ -137,6 +137,15 @@ ENTRY(memcpy_retl_o2_plus_63_8)
|
|||
ba,pt %xcc, __restore_asi
|
||||
add %o2, 8, %o0
|
||||
ENDPROC(memcpy_retl_o2_plus_63_8)
|
||||
ENTRY(memcpy_retl_o2_plus_o3)
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %o2, %o3, %o0
|
||||
ENDPROC(memcpy_retl_o2_plus_o3)
|
||||
ENTRY(memcpy_retl_o2_plus_o3_plus_1)
|
||||
add %o3, 1, %o3
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %o2, %o3, %o0
|
||||
ENDPROC(memcpy_retl_o2_plus_o3_plus_1)
|
||||
ENTRY(memcpy_retl_o2_plus_o5)
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %o2, %o5, %o0
|
||||
|
|
|
@ -281,7 +281,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
subcc %o5, 0x20, %o5
|
||||
EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32)
|
||||
EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24)
|
||||
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24)
|
||||
EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_16)
|
||||
EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8)
|
||||
bne,pt %icc, 1b
|
||||
add %o0, 0x20, %o0
|
||||
|
|
|
@ -79,8 +79,8 @@
|
|||
#ifndef EX_RETVAL
|
||||
#define EX_RETVAL(x) x
|
||||
__restore_asi:
|
||||
ret
|
||||
wr %g0, ASI_AIUS, %asi
|
||||
ret
|
||||
restore
|
||||
ENTRY(NG_ret_i2_plus_i4_plus_1)
|
||||
ba,pt %xcc, __restore_asi
|
||||
|
@ -125,15 +125,16 @@ ENTRY(NG_ret_i2_plus_g1_minus_56)
|
|||
ba,pt %xcc, __restore_asi
|
||||
add %i2, %g1, %i0
|
||||
ENDPROC(NG_ret_i2_plus_g1_minus_56)
|
||||
ENTRY(NG_ret_i2_plus_i4)
|
||||
ENTRY(NG_ret_i2_plus_i4_plus_16)
|
||||
add %i4, 16, %i4
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %i2, %i4, %i0
|
||||
ENDPROC(NG_ret_i2_plus_i4)
|
||||
ENTRY(NG_ret_i2_plus_i4_minus_8)
|
||||
sub %i4, 8, %i4
|
||||
ENDPROC(NG_ret_i2_plus_i4_plus_16)
|
||||
ENTRY(NG_ret_i2_plus_i4_plus_8)
|
||||
add %i4, 8, %i4
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %i2, %i4, %i0
|
||||
ENDPROC(NG_ret_i2_plus_i4_minus_8)
|
||||
ENDPROC(NG_ret_i2_plus_i4_plus_8)
|
||||
ENTRY(NG_ret_i2_plus_8)
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %i2, 8, %i0
|
||||
|
@ -160,6 +161,12 @@ ENTRY(NG_ret_i2_and_7_plus_i4)
|
|||
ba,pt %xcc, __restore_asi
|
||||
add %i2, %i4, %i0
|
||||
ENDPROC(NG_ret_i2_and_7_plus_i4)
|
||||
ENTRY(NG_ret_i2_and_7_plus_i4_plus_8)
|
||||
and %i2, 7, %i2
|
||||
add %i4, 8, %i4
|
||||
ba,pt %xcc, __restore_asi
|
||||
add %i2, %i4, %i0
|
||||
ENDPROC(NG_ret_i2_and_7_plus_i4)
|
||||
#endif
|
||||
|
||||
.align 64
|
||||
|
@ -405,13 +412,13 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
|
|||
andn %i2, 0xf, %i4
|
||||
and %i2, 0xf, %i2
|
||||
1: subcc %i4, 0x10, %i4
|
||||
EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4)
|
||||
EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4_plus_16)
|
||||
add %i1, 0x08, %i1
|
||||
EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4)
|
||||
EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4_plus_16)
|
||||
sub %i1, 0x08, %i1
|
||||
EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4)
|
||||
EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4_plus_16)
|
||||
add %i1, 0x8, %i1
|
||||
EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8)
|
||||
EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_plus_8)
|
||||
bgu,pt %XCC, 1b
|
||||
add %i1, 0x8, %i1
|
||||
73: andcc %i2, 0x8, %g0
|
||||
|
@ -468,7 +475,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
|
|||
subcc %i4, 0x8, %i4
|
||||
srlx %g3, %i3, %i5
|
||||
or %i5, %g2, %i5
|
||||
EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4)
|
||||
EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4_plus_8)
|
||||
add %o0, 0x8, %o0
|
||||
bgu,pt %icc, 1b
|
||||
sllx %g3, %g1, %g2
|
||||
|
|
|
@ -164,17 +164,18 @@ ENTRY(U1_gs_40_fp)
|
|||
retl
|
||||
add %o0, %o2, %o0
|
||||
ENDPROC(U1_gs_40_fp)
|
||||
ENTRY(U1_g3_0_fp)
|
||||
VISExitHalf
|
||||
retl
|
||||
add %g3, %o2, %o0
|
||||
ENDPROC(U1_g3_0_fp)
|
||||
ENTRY(U1_g3_8_fp)
|
||||
VISExitHalf
|
||||
add %g3, 8, %g3
|
||||
retl
|
||||
add %g3, %o2, %o0
|
||||
ENDPROC(U1_g3_8_fp)
|
||||
ENTRY(U1_g3_16_fp)
|
||||
VISExitHalf
|
||||
add %g3, 16, %g3
|
||||
retl
|
||||
add %g3, %o2, %o0
|
||||
ENDPROC(U1_g3_16_fp)
|
||||
ENTRY(U1_o2_0_fp)
|
||||
VISExitHalf
|
||||
retl
|
||||
|
@ -547,18 +548,18 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
62: FINISH_VISCHUNK(o0, f44, f46)
|
||||
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
|
||||
|
||||
93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
|
||||
93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_8_fp)
|
||||
add %o1, 8, %o1
|
||||
subcc %g3, 8, %g3
|
||||
faligndata %f0, %f2, %f8
|
||||
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
|
||||
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
|
||||
bl,pn %xcc, 95f
|
||||
add %o0, 8, %o0
|
||||
EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
|
||||
EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_8_fp)
|
||||
add %o1, 8, %o1
|
||||
subcc %g3, 8, %g3
|
||||
faligndata %f2, %f0, %f8
|
||||
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
|
||||
EX_ST_FP(STORE(std, %f8, %o0), U1_g3_16_fp)
|
||||
bge,pt %xcc, 93b
|
||||
add %o0, 8, %o0
|
||||
|
||||
|
|
|
@ -267,6 +267,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
faligndata %f10, %f12, %f26
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
|
||||
|
||||
and %o2, 0x3f, %o2
|
||||
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
|
||||
add %o1, 0x40, %o1
|
||||
bgu,pt %XCC, 1f
|
||||
|
@ -336,7 +337,6 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
* Also notice how this code is careful not to perform a
|
||||
* load past the end of the src buffer.
|
||||
*/
|
||||
and %o2, 0x3f, %o2
|
||||
andcc %o2, 0x38, %g2
|
||||
be,pn %XCC, 2f
|
||||
subcc %g2, 0x8, %g2
|
||||
|
|
|
@ -244,7 +244,7 @@ static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
|
|||
|
||||
static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
|
||||
{
|
||||
unsigned int p;
|
||||
unsigned long p;
|
||||
|
||||
/*
|
||||
* Load CPU and node number from the GDT. LSL is faster than RDTSCP
|
||||
|
@ -254,10 +254,10 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
|
|||
*
|
||||
* If RDPID is available, use it.
|
||||
*/
|
||||
alternative_io ("lsl %[seg],%[p]",
|
||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||
alternative_io ("lsl %[seg],%k[p]",
|
||||
"rdpid %[p]",
|
||||
X86_FEATURE_RDPID,
|
||||
[p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
|
||||
[p] "=r" (p), [seg] "r" (__CPUNODE_SEG));
|
||||
|
||||
if (cpu)
|
||||
*cpu = (p & VDSO_CPUNODE_MASK);
|
||||
|
|
|
@ -150,9 +150,11 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
|||
return;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, i)
|
||||
kobject_del(&ctx->kobj);
|
||||
if (ctx->kobj.state_in_sysfs)
|
||||
kobject_del(&ctx->kobj);
|
||||
|
||||
kobject_del(&hctx->kobj);
|
||||
if (hctx->kobj.state_in_sysfs)
|
||||
kobject_del(&hctx->kobj);
|
||||
}
|
||||
|
||||
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
|
|
|
@ -553,7 +553,8 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
|
|||
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
sector_t start)
|
||||
{
|
||||
unsigned int top, bottom, alignment, ret = 0;
|
||||
unsigned int top, bottom, alignment;
|
||||
int ret = 0;
|
||||
|
||||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||
|
|
|
@ -618,11 +618,14 @@ int x509_process_extension(void *context, size_t hdrlen,
|
|||
/*
|
||||
* Get hold of the basicConstraints
|
||||
* v[1] is the encoding size
|
||||
* (Expect 0x2 or greater, making it 1 or more bytes)
|
||||
* (Expect 0x00 for empty SEQUENCE with CA:FALSE, or
|
||||
* 0x03 or greater for non-empty SEQUENCE)
|
||||
* v[2] is the encoding type
|
||||
* (Expect an ASN1_BOOL for the CA)
|
||||
* v[3] is the contents of the ASN1_BOOL
|
||||
* (Expect 1 if the CA is TRUE)
|
||||
* v[3] is the length of the ASN1_BOOL
|
||||
* (Expect 1 for a single byte boolean)
|
||||
* v[4] is the contents of the ASN1_BOOL
|
||||
* (Expect 0xFF if the CA is TRUE)
|
||||
* vlen should match the entire extension size
|
||||
*/
|
||||
if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ))
|
||||
|
@ -631,8 +634,13 @@ int x509_process_extension(void *context, size_t hdrlen,
|
|||
return -EBADMSG;
|
||||
if (v[1] != vlen - 2)
|
||||
return -EBADMSG;
|
||||
if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1)
|
||||
/* Empty SEQUENCE means CA:FALSE (default value omitted per DER) */
|
||||
if (v[1] == 0)
|
||||
return 0;
|
||||
if (vlen >= 5 && v[2] == ASN1_BOOL && v[3] == 1 && v[4] == 0xFF)
|
||||
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA;
|
||||
else
|
||||
return -EBADMSG;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1139,7 +1139,7 @@ struct acpi_port_info {
|
|||
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
|
||||
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
|
||||
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
|
||||
#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
|
||||
#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
|
|
|
@ -2643,7 +2643,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
if (ndr_desc->target_node == NUMA_NO_NODE) {
|
||||
ndr_desc->target_node = phys_to_target_node(spa->address);
|
||||
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
|
||||
NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
|
||||
NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1410,6 +1410,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
|
|||
if (retval) {
|
||||
if (acpi_processor_registered == 0)
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
||||
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
|
||||
kfree(dev);
|
||||
return retval;
|
||||
}
|
||||
acpi_processor_registered++;
|
||||
|
|
|
@ -878,6 +878,10 @@ int __register_one_node(int nid)
|
|||
node_devices[nid] = node;
|
||||
|
||||
error = register_node(node_devices[nid], nid);
|
||||
if (error) {
|
||||
node_devices[nid] = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* link cpu under this node */
|
||||
for_each_present_cpu(cpu) {
|
||||
|
|
|
@ -600,8 +600,20 @@ static void __device_resume_noirq(struct device *dev, pm_message_t state, bool a
|
|||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
goto Out;
|
||||
|
||||
if (!dev->power.is_noirq_suspended)
|
||||
if (!dev->power.is_noirq_suspended) {
|
||||
/*
|
||||
* This means that system suspend has been aborted in the noirq
|
||||
* phase before invoking the noirq suspend callback for the
|
||||
* device, so if device_suspend_late() has left it in suspend,
|
||||
* device_resume_early() should leave it in suspend either in
|
||||
* case the early resume of it depends on the noirq resume that
|
||||
* has not run.
|
||||
*/
|
||||
if (dev_pm_skip_suspend(dev))
|
||||
dev->power.must_resume = false;
|
||||
|
||||
goto Out;
|
||||
}
|
||||
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Out;
|
||||
|
|
|
@ -827,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
|
|||
map->read_flag_mask = bus->read_flag_mask;
|
||||
}
|
||||
|
||||
if (config && config->read && config->write) {
|
||||
if (config->read && config->write) {
|
||||
map->reg_read = _regmap_bus_read;
|
||||
if (config->reg_update_bits)
|
||||
map->reg_update_bits = config->reg_update_bits;
|
||||
|
|
|
@ -1128,6 +1128,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
|
|||
if (!sock)
|
||||
return NULL;
|
||||
|
||||
if (!sk_is_tcp(sock->sk) &&
|
||||
!sk_is_stream_unix(sock->sk)) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
|
||||
*err = -EINVAL;
|
||||
sockfd_put(sock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (sock->ops->shutdown == sock_no_shutdown) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
|
||||
*err = -EINVAL;
|
||||
|
|
|
@ -211,7 +211,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
|
|||
|
||||
static unsigned long g_cache_size;
|
||||
module_param_named(cache_size, g_cache_size, ulong, 0444);
|
||||
MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
|
||||
MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
|
||||
|
||||
static unsigned int g_mbps;
|
||||
module_param_named(mbps, g_mbps, uint, 0444);
|
||||
|
|
|
@ -1103,6 +1103,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
|
|||
* Get physical address of MC portal for the root DPRC:
|
||||
*/
|
||||
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!plat_res)
|
||||
return -EINVAL;
|
||||
|
||||
mc_portal_phys_addr = plat_res->start;
|
||||
mc_portal_size = resource_size(plat_res);
|
||||
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
|
||||
|
|
|
@ -286,6 +286,7 @@ config HW_RANDOM_INGENIC_TRNG
|
|||
config HW_RANDOM_NOMADIK
|
||||
tristate "ST-Ericsson Nomadik Random Number Generator support"
|
||||
depends on ARCH_NOMADIK || COMPILE_TEST
|
||||
depends on ARM_AMBA
|
||||
default HW_RANDOM
|
||||
help
|
||||
This driver provides kernel-side support for the Random Number
|
||||
|
|
|
@ -240,6 +240,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
|
||||
if (IS_ERR(ks_sa_rng->clk))
|
||||
return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/energy_model.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scmi_protocol.h>
|
||||
|
@ -330,6 +331,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Older Broadcom STB chips had a "clocks" property for CPU node(s)
|
||||
* that did not match the SCMI performance protocol node, if we got
|
||||
* there, it means we had such an older Device Tree, therefore return
|
||||
* true to preserve backwards compatibility.
|
||||
*/
|
||||
if (of_machine_is_compatible("brcm,brcmstb"))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
|
|||
return -ENODEV;
|
||||
|
||||
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
|
||||
of_node_put(cpu_node);
|
||||
if (!saw_node)
|
||||
return -ENODEV;
|
||||
|
||||
pdev = of_find_device_by_node(saw_node);
|
||||
of_node_put(saw_node);
|
||||
of_node_put(cpu_node);
|
||||
if (!pdev)
|
||||
return -ENODEV;
|
||||
|
||||
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
if (!data) {
|
||||
put_device(&pdev->dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
data->spm = dev_get_drvdata(&pdev->dev);
|
||||
put_device(&pdev->dev);
|
||||
if (!data->spm)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -865,6 +865,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
|
|||
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
|
||||
ret = PTR_ERR(qm->debug.acc_diff_regs);
|
||||
qm->debug.acc_diff_regs = NULL;
|
||||
qm->debug.qm_diff_regs = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -689,6 +689,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
|
||||
/* Config data buffer pasid needed by Kunpeng 920 */
|
||||
hpre_config_pasid(qm);
|
||||
hpre_open_sva_prefetch(qm);
|
||||
|
||||
hpre_enable_clock_gate(qm);
|
||||
|
||||
|
@ -1366,8 +1367,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
hpre_open_sva_prefetch(qm);
|
||||
|
||||
hisi_qm_dev_err_init(qm);
|
||||
ret = hpre_show_last_regs_init(qm);
|
||||
if (ret)
|
||||
|
|
|
@ -3748,6 +3748,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
|
|||
}
|
||||
|
||||
pdev = container_of(dev, struct pci_dev, dev);
|
||||
if (pci_physfn(pdev) != qm->pdev) {
|
||||
pci_err(qm->pdev, "the pdev input does not match the pf!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*fun_index = pdev->devfn;
|
||||
|
||||
|
@ -4363,9 +4367,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
|
|||
{
|
||||
u32 value;
|
||||
|
||||
if (qm->err_ini->open_sva_prefetch)
|
||||
qm->err_ini->open_sva_prefetch(qm);
|
||||
|
||||
if (qm->ver >= QM_HW_V3)
|
||||
return;
|
||||
|
||||
|
|
|
@ -436,6 +436,45 @@ static void sec_set_endian(struct hisi_qm *qm)
|
|||
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
|
||||
}
|
||||
|
||||
static void sec_close_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val |= SEC_PREFETCH_DISABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
|
||||
val, !(val & SEC_SVA_DISABLE_READY),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to close sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_open_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
/* Enable prefetch */
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val &= SEC_PREFETCH_ENABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
|
||||
val, !(val & SEC_PREFETCH_DISABLE),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to open sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_engine_sva_config(struct hisi_qm *qm)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -469,45 +508,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
|
|||
writel_relaxed(reg, qm->io_base +
|
||||
SEC_INTERFACE_USER_CTRL1_REG);
|
||||
}
|
||||
}
|
||||
|
||||
static void sec_open_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
/* Enable prefetch */
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val &= SEC_PREFETCH_ENABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
|
||||
val, !(val & SEC_PREFETCH_DISABLE),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to open sva prefetch\n");
|
||||
}
|
||||
|
||||
static void sec_close_sva_prefetch(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
|
||||
return;
|
||||
|
||||
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
|
||||
val |= SEC_PREFETCH_DISABLE;
|
||||
writel(val, qm->io_base + SEC_PREFETCH_CFG);
|
||||
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
|
||||
val, !(val & SEC_SVA_DISABLE_READY),
|
||||
SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to close sva prefetch\n");
|
||||
sec_open_sva_prefetch(qm);
|
||||
}
|
||||
|
||||
static void sec_enable_clock_gate(struct hisi_qm *qm)
|
||||
|
@ -1090,7 +1091,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
sec_open_sva_prefetch(qm);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
sec_debug_regs_clear(qm);
|
||||
ret = sec_show_last_regs_init(qm);
|
||||
|
|
|
@ -469,10 +469,9 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int hisi_zip_set_high_perf(struct hisi_qm *qm)
|
||||
static void hisi_zip_set_high_perf(struct hisi_qm *qm)
|
||||
{
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
|
||||
if (perf_mode == HZIP_HIGH_COMP_PERF)
|
||||
|
@ -482,13 +481,6 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
|
|||
|
||||
/* Set perf mode */
|
||||
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
|
||||
ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
|
||||
val, val == perf_mode, HZIP_DELAY_1_US,
|
||||
HZIP_POLL_TIMEOUT_US);
|
||||
if (ret)
|
||||
pci_err(qm->pdev, "failed to set perf mode\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
|
||||
|
@ -585,6 +577,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
|
||||
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
|
||||
}
|
||||
hisi_zip_open_sva_prefetch(qm);
|
||||
|
||||
/* let's open all compression/decompression cores */
|
||||
dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
|
||||
|
@ -596,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
|
|||
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
|
||||
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
|
||||
|
||||
hisi_zip_set_high_perf(qm);
|
||||
hisi_zip_enable_clock_gate(qm);
|
||||
|
||||
return 0;
|
||||
|
@ -1180,11 +1174,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hisi_zip_set_high_perf(qm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hisi_zip_open_sva_prefetch(qm);
|
||||
hisi_qm_dev_err_init(qm);
|
||||
hisi_zip_debug_regs_clear(qm);
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
|
|||
struct device *dev = rctx->hcu_dev->dev;
|
||||
unsigned int remainder = 0;
|
||||
unsigned int total;
|
||||
size_t nents;
|
||||
int nents;
|
||||
size_t count;
|
||||
int rc;
|
||||
int i;
|
||||
|
@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
|
|||
/* Determine the number of scatter gather list entries to process. */
|
||||
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
|
||||
|
||||
if (nents < 0)
|
||||
return nents;
|
||||
|
||||
/* If there are entries to process, map them. */
|
||||
if (nents) {
|
||||
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
|
||||
|
|
|
@ -385,7 +385,8 @@ out_disable_cci_clk:
|
|||
out_free_resources:
|
||||
if (regulator_is_enabled(drv->proc_reg))
|
||||
regulator_disable(drv->proc_reg);
|
||||
if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
|
||||
if (!IS_ERR_OR_NULL(drv->sram_reg) &&
|
||||
regulator_is_enabled(drv->sram_reg))
|
||||
regulator_disable(drv->sram_reg);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -970,6 +970,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
|
|||
return !!GET_BITFIELD(mcmtr, 2, 2);
|
||||
}
|
||||
|
||||
static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
|
||||
{
|
||||
u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
|
||||
|
||||
edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
|
||||
|
||||
return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
|
||||
}
|
||||
|
||||
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
|
||||
struct res_config *cfg)
|
||||
{
|
||||
|
@ -983,6 +992,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
|
|||
if (!imc->mbase)
|
||||
continue;
|
||||
|
||||
if (i10nm_channel_disabled(imc, i)) {
|
||||
edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
ndimms = 0;
|
||||
amap = I10NM_GET_AMAP(imc, i);
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
config MESON_SM
|
||||
tristate "Amlogic Secure Monitor driver"
|
||||
depends on ARCH_MESON || COMPILE_TEST
|
||||
default y
|
||||
default ARCH_MESON
|
||||
depends on ARM64_4K_PAGES
|
||||
help
|
||||
Say y here to enable the Amlogic secure monitor driver
|
||||
|
|
|
@ -625,7 +625,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
|
|||
*
|
||||
* @handle: handle used to pass amdgpu_device pointer
|
||||
*
|
||||
* Initialize the hardware, boot up the VCPU and do some testing
|
||||
* Initialize the hardware, boot up the VCPU and do some testing.
|
||||
*
|
||||
* On SI, the UVD is meant to be used in a specific power state,
|
||||
* or alternatively the driver can manually enable its clock.
|
||||
* In amdgpu we use the dedicated UVD power state when DPM is enabled.
|
||||
* Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
|
||||
* for the SMU and afterwards enables the UVD clock.
|
||||
* This is automatically done by amdgpu_uvd_ring_begin_use when work
|
||||
* is submitted to the UVD ring. Here, we have to call it manually
|
||||
* in order to power up UVD before firmware validation.
|
||||
*
|
||||
* Note that we must not disable the UVD clock here, as that would
|
||||
* cause the ring test to fail. However, UVD is powered off
|
||||
* automatically after the ring test: amdgpu_uvd_ring_end_use calls
|
||||
* the UVD idle work handler which will disable the UVD clock when
|
||||
* all fences are signalled.
|
||||
*/
|
||||
static int uvd_v3_1_hw_init(void *handle)
|
||||
{
|
||||
|
@ -635,6 +650,15 @@ static int uvd_v3_1_hw_init(void *handle)
|
|||
int r;
|
||||
|
||||
uvd_v3_1_mc_resume(adev);
|
||||
uvd_v3_1_enable_mgcg(adev, true);
|
||||
|
||||
/* Make sure UVD is powered during FW validation.
|
||||
* It's going to be automatically powered off after the ring test.
|
||||
*/
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
else
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
|
||||
r = uvd_v3_1_fw_validate(adev);
|
||||
if (r) {
|
||||
|
@ -642,9 +666,6 @@ static int uvd_v3_1_hw_init(void *handle)
|
|||
return r;
|
||||
}
|
||||
|
||||
uvd_v3_1_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
|
||||
|
||||
uvd_v3_1_start(adev);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
|
|
|
@ -4142,7 +4142,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
|
|||
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
|
||||
break;
|
||||
default:
|
||||
r = EINVAL;
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
|
|||
if (dual_plane) {
|
||||
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
|
||||
num_pipes, pipe_idx);
|
||||
;
|
||||
if (src->sw_mode == dm_sw_linear)
|
||||
ASSERT(p1_pte_row_height_linear >= 8);
|
||||
|
||||
|
|
|
@ -66,6 +66,13 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
|
|||
(amdgpu_crtc->v_border * 2));
|
||||
|
||||
vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
|
||||
|
||||
/* we have issues with mclk switching with
|
||||
* refresh rates over 120 hz on the non-DC code.
|
||||
*/
|
||||
if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120)
|
||||
vblank_time_us = 0;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3066,7 +3066,13 @@ static bool si_dpm_vblank_too_short(void *handle)
|
|||
/* we never hit the non-gddr5 limit so disable it */
|
||||
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
|
||||
|
||||
if (vblank_time < switch_limit)
|
||||
/* Consider zero vblank time too short and disable MCLK switching.
|
||||
* Note that the vblank time is set to maximum when no displays are attached,
|
||||
* so we'll still enable MCLK switching in that case.
|
||||
*/
|
||||
if (vblank_time == 0)
|
||||
return true;
|
||||
else if (vblank_time < switch_limit)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
@ -3424,12 +3430,14 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
{
|
||||
struct si_ps *ps = si_get_ps(rps);
|
||||
struct amdgpu_clock_and_voltage_limits *max_limits;
|
||||
struct amdgpu_connector *conn;
|
||||
bool disable_mclk_switching = false;
|
||||
bool disable_sclk_switching = false;
|
||||
u32 mclk, sclk;
|
||||
u16 vddc, vddci, min_vce_voltage = 0;
|
||||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
u32 max_sclk = 0, max_mclk = 0;
|
||||
u32 high_pixelclock_count = 0;
|
||||
int i;
|
||||
|
||||
if (adev->asic_type == CHIP_HAINAN) {
|
||||
|
@ -3457,6 +3465,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
/* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
|
||||
* For example, 4K 60Hz and 1080p 144Hz fall into this category.
|
||||
* Find number of such displays connected.
|
||||
*/
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) ||
|
||||
!adev->mode_info.crtcs[i]->enabled)
|
||||
continue;
|
||||
|
||||
conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector);
|
||||
|
||||
if (conn->pixelclock_for_modeset > 297000)
|
||||
high_pixelclock_count++;
|
||||
}
|
||||
|
||||
/* These are some ad-hoc fixes to some issues observed with SI GPUs.
|
||||
* They are necessary because we don't have something like dce_calcs
|
||||
* for these GPUs to calculate bandwidth requirements.
|
||||
*/
|
||||
if (high_pixelclock_count) {
|
||||
/* On Oland, we observe some flickering when two 4K 60Hz
|
||||
* displays are connected, possibly because voltage is too low.
|
||||
* Raise the voltage by requiring a higher SCLK.
|
||||
* (Voltage cannot be adjusted independently without also SCLK.)
|
||||
*/
|
||||
if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
|
||||
disable_sclk_switching = true;
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
|
||||
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
|
||||
|
@ -5617,14 +5654,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
|
|||
|
||||
static int si_disable_ulv(struct amdgpu_device *adev)
|
||||
{
|
||||
struct si_power_info *si_pi = si_get_pi(adev);
|
||||
struct si_ulv_param *ulv = &si_pi->ulv;
|
||||
PPSMC_Result r;
|
||||
|
||||
if (ulv->supported)
|
||||
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
|
||||
0 : -EINVAL;
|
||||
|
||||
return 0;
|
||||
r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
|
||||
return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
|
||||
|
@ -5797,9 +5830,9 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
|
|||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = NULL;
|
||||
int i;
|
||||
|
||||
if (adev->pm.dpm.new_active_crtc_count == 0)
|
||||
return 0;
|
||||
u32 crtc_index = 0;
|
||||
u32 mclk_change_block_cp_min = 0;
|
||||
u32 mclk_change_block_cp_max = 0;
|
||||
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
|
||||
|
@ -5808,26 +5841,31 @@ static int si_upload_smc_data(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
if (amdgpu_crtc == NULL)
|
||||
return 0;
|
||||
/* When a display is plugged in, program these so that the SMC
|
||||
* performs MCLK switching when it doesn't cause flickering.
|
||||
* When no display is plugged in, there is no need to restrict
|
||||
* MCLK switching, so program them to zero.
|
||||
*/
|
||||
if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) {
|
||||
crtc_index = amdgpu_crtc->crtc_id;
|
||||
|
||||
if (amdgpu_crtc->line_time <= 0)
|
||||
return 0;
|
||||
if (amdgpu_crtc->line_time) {
|
||||
mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time;
|
||||
mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time;
|
||||
}
|
||||
}
|
||||
|
||||
if (si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_crtc_index,
|
||||
amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
|
||||
return 0;
|
||||
si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_crtc_index,
|
||||
crtc_index);
|
||||
|
||||
if (si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
|
||||
amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
|
||||
return 0;
|
||||
si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
|
||||
mclk_change_block_cp_min);
|
||||
|
||||
if (si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
|
||||
amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
|
||||
return 0;
|
||||
si_write_smc_soft_register(adev,
|
||||
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
|
||||
mclk_change_block_cp_max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -85,6 +85,7 @@ config DRM_ITE_IT6505
|
|||
select EXTCON
|
||||
select CRYPTO
|
||||
select CRYPTO_HASH
|
||||
select REGMAP_I2C
|
||||
help
|
||||
ITE IT6505 DisplayPort bridge chip driver.
|
||||
|
||||
|
|
|
@ -433,7 +433,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
|
|||
static int dpu_encoder_phys_wb_wait_for_commit_done(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
unsigned long ret;
|
||||
int ret;
|
||||
struct dpu_encoder_wait_info wait_info;
|
||||
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ static int nt35560_set_brightness(struct backlight_device *bl)
|
|||
par = 0x00;
|
||||
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
|
||||
&par, 1);
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1408,7 +1408,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
|
|||
unsigned block_align, unsigned height_align, unsigned base_align,
|
||||
unsigned *l0_size, unsigned *mipmap_size)
|
||||
{
|
||||
unsigned offset, i, level;
|
||||
unsigned offset, i;
|
||||
unsigned width, height, depth, size;
|
||||
unsigned blocksize;
|
||||
unsigned nbx, nby;
|
||||
|
@ -1420,7 +1420,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
|
|||
w0 = r600_mip_minify(w0, 0);
|
||||
h0 = r600_mip_minify(h0, 0);
|
||||
d0 = r600_mip_minify(d0, 0);
|
||||
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
|
||||
for (i = 0, offset = 0; i < nlevels; i++) {
|
||||
width = r600_mip_minify(w0, i);
|
||||
nbx = r600_fmt_get_nblocksx(format, width);
|
||||
|
||||
|
|
|
@ -113,8 +113,8 @@ struct mlxreg_fan {
|
|||
int divider;
|
||||
};
|
||||
|
||||
static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state);
|
||||
static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state, bool thermal);
|
||||
|
||||
static int
|
||||
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
||||
|
@ -224,8 +224,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
|||
* last thermal state.
|
||||
*/
|
||||
if (pwm->last_hwmon_state >= pwm->last_thermal_state)
|
||||
return mlxreg_fan_set_cur_state(pwm->cdev,
|
||||
pwm->last_hwmon_state);
|
||||
return _mlxreg_fan_set_cur_state(pwm->cdev,
|
||||
pwm->last_hwmon_state,
|
||||
false);
|
||||
return 0;
|
||||
}
|
||||
return regmap_write(fan->regmap, pwm->reg, val);
|
||||
|
@ -357,9 +358,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state)
|
||||
|
||||
static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state, bool thermal)
|
||||
{
|
||||
struct mlxreg_fan_pwm *pwm = cdev->devdata;
|
||||
struct mlxreg_fan *fan = pwm->fan;
|
||||
|
@ -369,7 +369,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
return -EINVAL;
|
||||
|
||||
/* Save thermal state. */
|
||||
pwm->last_thermal_state = state;
|
||||
if (thermal)
|
||||
pwm->last_thermal_state = state;
|
||||
|
||||
state = max_t(unsigned long, state, pwm->last_hwmon_state);
|
||||
err = regmap_write(fan->regmap, pwm->reg,
|
||||
|
@ -381,6 +382,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
unsigned long state)
|
||||
|
||||
{
|
||||
return _mlxreg_fan_set_cur_state(cdev, state, true);
|
||||
}
|
||||
|
||||
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
|
||||
.get_max_state = mlxreg_fan_get_max_state,
|
||||
.get_cur_state = mlxreg_fan_get_cur_state,
|
||||
|
|
|
@ -1638,8 +1638,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
|
||||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
|
||||
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
|
||||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
|
||||
sink_ops(csdev)->alloc_buffer) {
|
||||
ret = etm_perf_add_symlink_sink(csdev);
|
||||
|
||||
if (ret) {
|
||||
|
|
|
@ -481,7 +481,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
|||
etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
|
||||
etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
|
||||
}
|
||||
etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
|
||||
if (drvdata->numextinsel)
|
||||
etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
|
||||
for (i = 0; i < drvdata->nr_cntr; i++) {
|
||||
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
|
||||
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
|
||||
|
@ -1323,6 +1324,7 @@ static void etm4_init_arch_data(void *info)
|
|||
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
|
||||
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
|
||||
drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
|
||||
drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
|
||||
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
|
||||
drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
|
||||
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
|
||||
|
@ -1750,7 +1752,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
|||
state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
|
||||
state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
|
||||
}
|
||||
state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
|
||||
|
||||
if (drvdata->numextinsel)
|
||||
state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
|
||||
|
||||
for (i = 0; i < drvdata->nr_cntr; i++) {
|
||||
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
|
||||
|
@ -1882,7 +1886,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
|||
etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
|
||||
etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
|
||||
}
|
||||
etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
|
||||
if (drvdata->numextinsel)
|
||||
etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
|
||||
|
||||
for (i = 0; i < drvdata->nr_cntr; i++) {
|
||||
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
|
||||
|
@ -2113,6 +2118,10 @@ static int etm4_probe(struct device *dev)
|
|||
if (WARN_ON(!drvdata))
|
||||
return -ENOMEM;
|
||||
|
||||
drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
|
||||
if (IS_ERR(drvdata->atclk))
|
||||
return PTR_ERR(drvdata->atclk);
|
||||
|
||||
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
|
||||
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
|
||||
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
|
||||
|
@ -2364,8 +2373,8 @@ static int etm4_runtime_suspend(struct device *dev)
|
|||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (drvdata->pclk && !IS_ERR(drvdata->pclk))
|
||||
clk_disable_unprepare(drvdata->pclk);
|
||||
clk_disable_unprepare(drvdata->atclk);
|
||||
clk_disable_unprepare(drvdata->pclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2373,11 +2382,17 @@ static int etm4_runtime_suspend(struct device *dev)
|
|||
static int etm4_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
if (drvdata->pclk && !IS_ERR(drvdata->pclk))
|
||||
clk_prepare_enable(drvdata->pclk);
|
||||
ret = clk_prepare_enable(drvdata->pclk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
ret = clk_prepare_enable(drvdata->atclk);
|
||||
if (ret)
|
||||
clk_disable_unprepare(drvdata->pclk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -162,6 +162,7 @@
|
|||
#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
|
||||
|
||||
#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
|
||||
#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9)
|
||||
#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
|
||||
#define TRCIDR5_ATBTRIG BIT(22)
|
||||
#define TRCIDR5_LPOVERRIDE BIT(23)
|
||||
|
@ -919,7 +920,8 @@ struct etmv4_save_state {
|
|||
|
||||
/**
|
||||
* struct etm4_drvdata - specifics associated to an ETM component
|
||||
* @pclk APB clock if present, otherwise NULL
|
||||
* @pclk: APB clock if present, otherwise NULL
|
||||
* @atclk: Optional clock for the core parts of the ETMv4.
|
||||
* @base: Memory mapped base address for this component.
|
||||
* @csdev: Component vitals needed by the framework.
|
||||
* @spinlock: Only one at a time pls.
|
||||
|
@ -987,6 +989,7 @@ struct etmv4_save_state {
|
|||
*/
|
||||
struct etmv4_drvdata {
|
||||
struct clk *pclk;
|
||||
struct clk *atclk;
|
||||
void __iomem *base;
|
||||
struct coresight_device *csdev;
|
||||
spinlock_t spinlock;
|
||||
|
@ -999,6 +1002,7 @@ struct etmv4_drvdata {
|
|||
u8 nr_cntr;
|
||||
u8 nr_ext_inp;
|
||||
u8 numcidc;
|
||||
u8 numextinsel;
|
||||
u8 numvmidc;
|
||||
u8 nrseqstate;
|
||||
u8 nr_event;
|
||||
|
|
|
@ -21,7 +21,8 @@
|
|||
#include "coresight-self-hosted-trace.h"
|
||||
#include "coresight-trbe.h"
|
||||
|
||||
#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
|
||||
#define PERF_IDX2OFF(idx, buf) \
|
||||
((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
|
||||
|
||||
/*
|
||||
* A padding packet that will help the user space tools
|
||||
|
@ -743,12 +744,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
|
|||
|
||||
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
|
||||
if (!pglist) {
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
|
@ -758,7 +759,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
|
|||
if (!buf->trbe_base) {
|
||||
kfree(pglist);
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
|
||||
buf->trbe_write = buf->trbe_base;
|
||||
|
|
|
@ -394,6 +394,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
|||
|
||||
exit_probe:
|
||||
dw_i2c_plat_pm_cleanup(dev);
|
||||
i2c_dw_prepare_clk(dev, false);
|
||||
exit_reset:
|
||||
reset_control_assert(dev->rst);
|
||||
return ret;
|
||||
|
@ -411,9 +412,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
|
|||
i2c_dw_disable(dev);
|
||||
|
||||
pm_runtime_dont_use_autosuspend(device);
|
||||
pm_runtime_put_sync(device);
|
||||
pm_runtime_put_noidle(device);
|
||||
dw_i2c_plat_pm_cleanup(dev);
|
||||
|
||||
i2c_dw_prepare_clk(dev, false);
|
||||
|
||||
i2c_dw_remove_lock_support(dev);
|
||||
|
||||
reset_control_assert(dev->rst);
|
||||
|
|
|
@ -1243,6 +1243,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
|
|||
{
|
||||
int ret;
|
||||
int left_num = num;
|
||||
bool write_then_read_en = false;
|
||||
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
|
||||
|
||||
ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
|
||||
|
@ -1256,6 +1257,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
|
|||
if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
|
||||
msgs[0].addr == msgs[1].addr) {
|
||||
i2c->auto_restart = 0;
|
||||
write_then_read_en = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1280,12 +1282,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
|
|||
else
|
||||
i2c->op = I2C_MASTER_WR;
|
||||
|
||||
if (!i2c->auto_restart) {
|
||||
if (num > 1) {
|
||||
/* combined two messages into one transaction */
|
||||
i2c->op = I2C_MASTER_WRRD;
|
||||
left_num--;
|
||||
}
|
||||
if (write_then_read_en) {
|
||||
/* combined two messages into one transaction */
|
||||
i2c->op = I2C_MASTER_WRRD;
|
||||
left_num--;
|
||||
}
|
||||
|
||||
/* always use DMA mode. */
|
||||
|
@ -1293,7 +1293,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
|
|||
if (ret < 0)
|
||||
goto err_exit;
|
||||
|
||||
msgs++;
|
||||
if (i2c->op == I2C_MASTER_WRRD)
|
||||
msgs += 2;
|
||||
else
|
||||
msgs++;
|
||||
}
|
||||
/* the return value is number of executed messages */
|
||||
ret = num;
|
||||
|
|
|
@ -369,6 +369,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
|
|||
SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
|
||||
if (ret) {
|
||||
dev_err(master->dev, "Timeout when polling for COMPLETE\n");
|
||||
i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -430,9 +431,24 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
|
|||
*/
|
||||
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
|
||||
|
||||
/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
|
||||
writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
|
||||
SVC_I3C_MCTRL_IBIRESP_AUTO,
|
||||
/*
|
||||
* Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
|
||||
* instend of using AUTO_IBI.
|
||||
*
|
||||
* Using AutoIBI request may cause controller to remain in AutoIBI state when
|
||||
* there is a glitch on SDA line (high->low->high).
|
||||
* 1. SDA high->low, raising an interrupt to execute IBI isr.
|
||||
* 2. SDA low->high.
|
||||
* 3. IBI isr writes an AutoIBI request.
|
||||
* 4. The controller will not start AutoIBI process because SDA is not low.
|
||||
* 5. IBIWON polling times out.
|
||||
* 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
|
||||
*/
|
||||
writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
|
||||
SVC_I3C_MCTRL_TYPE_I3C |
|
||||
SVC_I3C_MCTRL_IBIRESP_MANUAL |
|
||||
SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
|
||||
SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
|
||||
master->regs + SVC_I3C_MCTRL);
|
||||
|
||||
/* Wait for IBIWON, should take approximately 100us */
|
||||
|
@ -452,10 +468,15 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
|
|||
switch (ibitype) {
|
||||
case SVC_I3C_MSTATUS_IBITYPE_IBI:
|
||||
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
|
||||
if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
|
||||
if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
|
||||
svc_i3c_master_nack_ibi(master);
|
||||
else
|
||||
} else {
|
||||
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
|
||||
svc_i3c_master_ack_ibi(master, true);
|
||||
else
|
||||
svc_i3c_master_ack_ibi(master, false);
|
||||
svc_i3c_master_handle_ibi(master, dev);
|
||||
}
|
||||
break;
|
||||
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
|
||||
if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include <linux/iio/iio.h>
|
||||
#include <linux/iio/iio-opaque.h>
|
||||
|
@ -635,7 +636,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
|||
{
|
||||
int scale_type, scale_val, scale_val2;
|
||||
int offset_type, offset_val, offset_val2;
|
||||
s64 raw64 = raw;
|
||||
s64 denominator, raw64 = raw;
|
||||
|
||||
offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
|
||||
IIO_CHAN_INFO_OFFSET);
|
||||
|
@ -670,7 +671,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
|||
* If no channel scaling is available apply consumer scale to
|
||||
* raw value and return.
|
||||
*/
|
||||
*processed = raw * scale;
|
||||
*processed = raw64 * scale;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -679,20 +680,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
|
|||
*processed = raw64 * scale_val * scale;
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_MICRO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000LL);
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_NANO:
|
||||
if (scale_val2 < 0)
|
||||
*processed = -raw64 * scale_val * scale;
|
||||
else
|
||||
*processed = raw64 * scale_val * scale;
|
||||
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
|
||||
1000000000LL);
|
||||
switch (scale_type) {
|
||||
case IIO_VAL_INT_PLUS_MICRO:
|
||||
denominator = MICRO;
|
||||
break;
|
||||
case IIO_VAL_INT_PLUS_NANO:
|
||||
denominator = NANO;
|
||||
break;
|
||||
}
|
||||
*processed = raw64 * scale * abs(scale_val);
|
||||
*processed += div_s64(raw64 * scale * abs(scale_val2), denominator);
|
||||
if (scale_val < 0 || scale_val2 < 0)
|
||||
*processed *= -1;
|
||||
break;
|
||||
case IIO_VAL_FRACTIONAL:
|
||||
*processed = div_s64(raw64 * (s64)scale_val * scale,
|
||||
|
|
|
@ -460,14 +460,10 @@ static int addr_resolve_neigh(const struct dst_entry *dst,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ndev_flags & IFF_LOOPBACK) {
|
||||
if (ndev_flags & IFF_LOOPBACK)
|
||||
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
|
||||
} else {
|
||||
if (!(ndev_flags & IFF_NOARP)) {
|
||||
/* If the device doesn't do ARP internally */
|
||||
ret = fetch_ha(dst, addr, dst_in, seq);
|
||||
}
|
||||
}
|
||||
else
|
||||
ret = fetch_ha(dst, addr, dst_in, seq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1032,8 +1032,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
|
|||
struct cm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
|
||||
cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
|
||||
}
|
||||
|
||||
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
|
||||
|
|
|
@ -1013,6 +1013,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
|||
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
|
||||
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
|
||||
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
|
||||
delta = timeout - sa_local_svc_timeout_ms;
|
||||
if (delta < 0)
|
||||
abs_delta = -delta;
|
||||
|
@ -1020,7 +1022,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
|||
abs_delta = delta;
|
||||
|
||||
if (delta != 0) {
|
||||
spin_lock_irqsave(&ib_nl_request_lock, flags);
|
||||
sa_local_svc_timeout_ms = timeout;
|
||||
list_for_each_entry(query, &ib_nl_request_list, list) {
|
||||
if (delta < 0 && abs_delta > query->timeout)
|
||||
|
@ -1038,9 +1039,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
|
|||
if (delay)
|
||||
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
|
||||
(unsigned long)delay);
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
|
||||
|
||||
settimeout_out:
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1691,7 +1691,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
|
|||
}
|
||||
|
||||
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
|
||||
struct mlx5_core_dev *slave)
|
||||
struct mlx5_core_dev *slave,
|
||||
struct mlx5_ib_lb_state *lb_state)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -1703,6 +1704,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
lb_state->force_enable = true;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -1711,16 +1713,22 @@ out:
|
|||
}
|
||||
|
||||
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
|
||||
struct mlx5_core_dev *slave)
|
||||
struct mlx5_core_dev *slave,
|
||||
struct mlx5_ib_lb_state *lb_state)
|
||||
{
|
||||
mlx5_nic_vport_update_local_lb(slave, false);
|
||||
mlx5_nic_vport_update_local_lb(master, false);
|
||||
|
||||
lb_state->force_enable = false;
|
||||
}
|
||||
|
||||
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (dev->lb.force_enable)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev->lb.mutex);
|
||||
if (td)
|
||||
dev->lb.user_td++;
|
||||
|
@ -1742,6 +1750,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
|||
|
||||
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
|
||||
{
|
||||
if (dev->lb.force_enable)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->lb.mutex);
|
||||
if (td)
|
||||
dev->lb.user_td--;
|
||||
|
@ -3251,7 +3262,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
|
|||
|
||||
lockdep_assert_held(&mlx5_ib_multiport_mutex);
|
||||
|
||||
mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
|
||||
mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
|
||||
|
||||
mlx5_core_mp_event_replay(ibdev->mdev,
|
||||
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
|
||||
|
@ -3348,7 +3359,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
|
|||
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
|
||||
&key);
|
||||
|
||||
err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
|
||||
err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
|
||||
if (err)
|
||||
goto unbind;
|
||||
|
||||
|
|
|
@ -1065,6 +1065,7 @@ struct mlx5_ib_lb_state {
|
|||
u32 user_td;
|
||||
int qps;
|
||||
bool enabled;
|
||||
bool force_enable;
|
||||
};
|
||||
|
||||
struct mlx5_ib_pf_eq {
|
||||
|
|
|
@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
|
|||
* yield the cpu and reschedule the task
|
||||
*/
|
||||
if (!ret) {
|
||||
task->state = TASK_STATE_IDLE;
|
||||
resched = 1;
|
||||
if (task->state != TASK_STATE_DRAINING) {
|
||||
task->state = TASK_STATE_IDLE;
|
||||
resched = 1;
|
||||
} else {
|
||||
cont = 1;
|
||||
}
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -761,7 +761,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
|||
struct siw_wqe *wqe = tx_wqe(qp);
|
||||
|
||||
unsigned long flags;
|
||||
int rv = 0;
|
||||
int rv = 0, imm_err = 0;
|
||||
|
||||
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
|
||||
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
|
||||
|
@ -947,9 +947,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
|||
* Send directly if SQ processing is not in progress.
|
||||
* Eventual immediate errors (rv < 0) do not affect the involved
|
||||
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
|
||||
* processing, if new work is already pending. But rv must be passed
|
||||
* to caller.
|
||||
* processing, if new work is already pending. But rv and pointer
|
||||
* to failed work request must be passed to caller.
|
||||
*/
|
||||
if (unlikely(rv < 0)) {
|
||||
/*
|
||||
* Immediate error
|
||||
*/
|
||||
siw_dbg_qp(qp, "Immediate error %d\n", rv);
|
||||
imm_err = rv;
|
||||
*bad_wr = wr;
|
||||
}
|
||||
if (wqe->wr_status != SIW_WR_IDLE) {
|
||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||
goto skip_direct_sending;
|
||||
|
@ -974,15 +982,10 @@ skip_direct_sending:
|
|||
|
||||
up_read(&qp->state_lock);
|
||||
|
||||
if (rv >= 0)
|
||||
return 0;
|
||||
/*
|
||||
* Immediate error
|
||||
*/
|
||||
siw_dbg_qp(qp, "error %d\n", rv);
|
||||
if (unlikely(imm_err))
|
||||
return imm_err;
|
||||
|
||||
*bad_wr = wr;
|
||||
return rv;
|
||||
return (rv >= 0) ? 0 : rv;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
|
|||
if (in_compat_syscall()) {
|
||||
struct uinput_ff_upload_compat ff_up_compat;
|
||||
|
||||
memset(&ff_up_compat, 0, sizeof(ff_up_compat));
|
||||
ff_up_compat.request_id = ff_up->request_id;
|
||||
ff_up_compat.retval = ff_up->retval;
|
||||
/*
|
||||
|
|
|
@ -3324,7 +3324,7 @@ static int mxt_probe(struct i2c_client *client)
|
|||
if (data->reset_gpio) {
|
||||
/* Wait a while and then de-assert the RESET GPIO line */
|
||||
msleep(MXT_RESET_GPIO_TIME);
|
||||
gpiod_set_value(data->reset_gpio, 0);
|
||||
gpiod_set_value_cansleep(data->reset_gpio, 0);
|
||||
msleep(MXT_RESET_INVALID_CHG);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
|
@ -114,36 +114,39 @@ enum {
|
|||
REG_THERM_THRSH1,
|
||||
REG_THERM_THRSH2,
|
||||
REG_THERM_THRSH3,
|
||||
REG_TORCH_CLAMP,
|
||||
REG_MAX_COUNT,
|
||||
};
|
||||
|
||||
static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
|
||||
REG_FIELD(0x08, 0, 7), /* status1 */
|
||||
REG_FIELD(0x09, 0, 7), /* status2 */
|
||||
REG_FIELD(0x0a, 0, 7), /* status3 */
|
||||
REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */
|
||||
REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */
|
||||
REG_FIELD(0x46, 7, 7), /* module_en */
|
||||
REG_FIELD(0x47, 0, 5), /* iresolution */
|
||||
REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
|
||||
REG_FIELD(0x4c, 0, 2), /* chan_en */
|
||||
REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
|
||||
REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
|
||||
REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
|
||||
[REG_STATUS1] = REG_FIELD(0x08, 0, 7),
|
||||
[REG_STATUS2] = REG_FIELD(0x09, 0, 7),
|
||||
[REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
|
||||
[REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
|
||||
[REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
|
||||
[REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
|
||||
[REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
|
||||
[REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
|
||||
[REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
|
||||
[REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
|
||||
[REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
|
||||
[REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
|
||||
[REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6),
|
||||
};
|
||||
|
||||
static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
|
||||
REG_FIELD(0x06, 0, 7), /* status1 */
|
||||
REG_FIELD(0x07, 0, 6), /* status2 */
|
||||
REG_FIELD(0x09, 0, 7), /* status3 */
|
||||
REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */
|
||||
REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */
|
||||
REG_FIELD(0x46, 7, 7), /* module_en */
|
||||
REG_FIELD(0x49, 0, 3), /* iresolution */
|
||||
REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
|
||||
REG_FIELD(0x4e, 0, 3), /* chan_en */
|
||||
REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
|
||||
REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
|
||||
[REG_STATUS1] = REG_FIELD(0x06, 0, 7),
|
||||
[REG_STATUS2] = REG_FIELD(0x07, 0, 6),
|
||||
[REG_STATUS3] = REG_FIELD(0x09, 0, 7),
|
||||
[REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1),
|
||||
[REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1),
|
||||
[REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
|
||||
[REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3),
|
||||
[REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1),
|
||||
[REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3),
|
||||
[REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2),
|
||||
[REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2),
|
||||
[REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6),
|
||||
};
|
||||
|
||||
struct qcom_flash_data {
|
||||
|
@ -156,6 +159,7 @@ struct qcom_flash_data {
|
|||
u8 max_channels;
|
||||
u8 chan_en_bits;
|
||||
u8 revision;
|
||||
u8 torch_clamp;
|
||||
};
|
||||
|
||||
struct qcom_flash_led {
|
||||
|
@ -702,6 +706,7 @@ static int qcom_flash_register_led_device(struct device *dev,
|
|||
u32 current_ua, timeout_us;
|
||||
u32 channels[4];
|
||||
int i, rc, count;
|
||||
u8 torch_clamp;
|
||||
|
||||
count = fwnode_property_count_u32(node, "led-sources");
|
||||
if (count <= 0) {
|
||||
|
@ -751,6 +756,12 @@ static int qcom_flash_register_led_device(struct device *dev,
|
|||
current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count);
|
||||
led->max_torch_current_ma = current_ua / UA_PER_MA;
|
||||
|
||||
torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA;
|
||||
if (torch_clamp != 0)
|
||||
torch_clamp--;
|
||||
|
||||
flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp);
|
||||
|
||||
if (fwnode_property_present(node, "flash-max-microamp")) {
|
||||
flash->led_cdev.flags |= LED_DEV_CAP_FLASH;
|
||||
|
||||
|
@ -918,8 +929,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
|
|||
flash_data->leds_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp);
|
||||
release:
|
||||
fwnode_handle_put(child);
|
||||
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
|
||||
|
|
|
@ -161,6 +161,7 @@ struct mapped_device {
|
|||
#define DMF_SUSPENDED_INTERNALLY 7
|
||||
#define DMF_POST_SUSPENDING 8
|
||||
#define DMF_EMULATE_ZONE_APPEND 9
|
||||
#define DMF_QUEUE_STOPPED 10
|
||||
|
||||
void disable_discard(struct mapped_device *md);
|
||||
void disable_write_zeroes(struct mapped_device *md);
|
||||
|
|
|
@ -2688,7 +2688,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
|||
{
|
||||
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
|
||||
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
lockdep_assert_held(&md->suspend_lock);
|
||||
|
||||
|
@ -2740,8 +2740,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
|||
* Stop md->queue before flushing md->wq in case request-based
|
||||
* dm defers requests to md->wq from md->queue.
|
||||
*/
|
||||
if (dm_request_based(md))
|
||||
if (map && dm_request_based(md)) {
|
||||
dm_stop_queue(md->queue);
|
||||
set_bit(DMF_QUEUE_STOPPED, &md->flags);
|
||||
}
|
||||
|
||||
flush_workqueue(md->wq);
|
||||
|
||||
|
@ -2750,7 +2752,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
|||
* We call dm_wait_for_completion to wait for all existing requests
|
||||
* to finish.
|
||||
*/
|
||||
r = dm_wait_for_completion(md, task_state);
|
||||
if (map)
|
||||
r = dm_wait_for_completion(md, task_state);
|
||||
if (!r)
|
||||
set_bit(dmf_suspended_flag, &md->flags);
|
||||
|
||||
|
@ -2763,7 +2766,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
|||
if (r < 0) {
|
||||
dm_queue_flush(md);
|
||||
|
||||
if (dm_request_based(md))
|
||||
if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
|
||||
dm_start_queue(md->queue);
|
||||
|
||||
unlock_fs(md);
|
||||
|
@ -2847,7 +2850,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
|
|||
* so that mapping of targets can work correctly.
|
||||
* Request-based dm is queueing the deferred I/Os in its request_queue.
|
||||
*/
|
||||
if (dm_request_based(md))
|
||||
if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
|
||||
dm_start_queue(md->queue);
|
||||
|
||||
unlock_fs(md);
|
||||
|
|
|
@ -1331,10 +1331,13 @@ static int rj54n1_probe(struct i2c_client *client)
|
|||
V4L2_CID_GAIN, 0, 127, 1, 66);
|
||||
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
|
||||
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
|
||||
rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
|
||||
if (rj54n1->hdl.error)
|
||||
return rj54n1->hdl.error;
|
||||
|
||||
if (rj54n1->hdl.error) {
|
||||
ret = rj54n1->hdl.error;
|
||||
goto err_free_ctrl;
|
||||
}
|
||||
|
||||
rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
|
||||
rj54n1->clk_div = clk_div;
|
||||
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
|
||||
rj54n1->rect.top = RJ54N1_ROW_SKIP;
|
||||
|
|
|
@ -154,12 +154,6 @@ struct zoran_jpg_settings {
|
|||
|
||||
struct zoran;
|
||||
|
||||
/* zoran_fh contains per-open() settings */
|
||||
struct zoran_fh {
|
||||
struct v4l2_fh fh;
|
||||
struct zoran *zr;
|
||||
};
|
||||
|
||||
struct card_info {
|
||||
enum card_type type;
|
||||
char name[32];
|
||||
|
|
|
@ -511,12 +511,11 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
|
|||
struct v4l2_format *fmt)
|
||||
{
|
||||
struct zoran *zr = video_drvdata(file);
|
||||
struct zoran_fh *fh = __fh;
|
||||
int i;
|
||||
int res = 0;
|
||||
|
||||
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
|
||||
return zoran_s_fmt_vid_out(file, fh, fmt);
|
||||
return zoran_s_fmt_vid_out(file, __fh, fmt);
|
||||
|
||||
for (i = 0; i < NUM_FORMATS; i++)
|
||||
if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
|
||||
|
|
|
@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
|
||||
static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend)
|
||||
{
|
||||
struct delta_dev *delta = pctx->dev;
|
||||
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
|
||||
|
@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
|
|||
|
||||
memset(params, 0, sizeof(*params));
|
||||
|
||||
params->picture_start_addr_p = (u32)(au->paddr);
|
||||
params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
|
||||
params->picture_start_addr_p = pstart;
|
||||
params->picture_end_addr_p = pend;
|
||||
|
||||
/*
|
||||
* !WARNING!
|
||||
|
@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
|
|||
struct delta_dev *delta = pctx->dev;
|
||||
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
|
||||
int ret;
|
||||
struct delta_au au = *pau;
|
||||
void *au_vaddr = pau->vaddr;
|
||||
dma_addr_t au_dma = pau->paddr;
|
||||
size_t au_size = pau->size;
|
||||
unsigned int data_offset = 0;
|
||||
struct mjpeg_header *header = &ctx->header_struct;
|
||||
|
||||
if (!ctx->header) {
|
||||
ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
|
||||
ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
|
||||
header, &data_offset);
|
||||
if (ret) {
|
||||
pctx->stream_errors++;
|
||||
|
@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
|
|||
goto err;
|
||||
}
|
||||
|
||||
ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
|
||||
ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
|
||||
ctx->header, &data_offset);
|
||||
if (ret) {
|
||||
pctx->stream_errors++;
|
||||
goto err;
|
||||
}
|
||||
|
||||
au.paddr += data_offset;
|
||||
au.vaddr += data_offset;
|
||||
au_dma += data_offset;
|
||||
au_vaddr += data_offset;
|
||||
|
||||
ret = delta_mjpeg_ipc_decode(pctx, &au);
|
||||
ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = {
|
|||
[RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202),
|
||||
[RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038),
|
||||
[RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039),
|
||||
[RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
|
||||
[RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
|
||||
[RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838),
|
||||
[RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839),
|
||||
[RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403)
|
||||
|
|
|
@ -90,6 +90,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
|
|||
struct resource *mem;
|
||||
void __iomem *base;
|
||||
struct gpio_chip *mmc_gpio_chip;
|
||||
int ret;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!mem)
|
||||
|
@ -110,7 +111,10 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
|
|||
bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
|
||||
NULL, NULL, NULL, NULL, 0);
|
||||
mmc_gpio_chip->ngpio = 2;
|
||||
devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
|
||||
|
||||
ret = devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
|
||||
vexpress_sysreg_cells,
|
||||
|
|
|
@ -362,26 +362,21 @@ static int fastrpc_map_get(struct fastrpc_map *map)
|
|||
|
||||
|
||||
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
|
||||
struct fastrpc_map **ppmap, bool take_ref)
|
||||
struct fastrpc_map **ppmap)
|
||||
{
|
||||
struct fastrpc_session_ctx *sess = fl->sctx;
|
||||
struct fastrpc_map *map = NULL;
|
||||
struct dma_buf *buf;
|
||||
int ret = -ENOENT;
|
||||
|
||||
buf = dma_buf_get(fd);
|
||||
if (IS_ERR(buf))
|
||||
return PTR_ERR(buf);
|
||||
|
||||
spin_lock(&fl->lock);
|
||||
list_for_each_entry(map, &fl->maps, node) {
|
||||
if (map->fd != fd)
|
||||
if (map->fd != fd || map->buf != buf)
|
||||
continue;
|
||||
|
||||
if (take_ref) {
|
||||
ret = fastrpc_map_get(map);
|
||||
if (ret) {
|
||||
dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
|
||||
__func__, fd, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*ppmap = map;
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -751,7 +746,7 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
|
|||
.release = fastrpc_release,
|
||||
};
|
||||
|
||||
static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
|
||||
static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
|
||||
u64 len, u32 attr, struct fastrpc_map **ppmap)
|
||||
{
|
||||
struct fastrpc_session_ctx *sess = fl->sctx;
|
||||
|
@ -759,9 +754,6 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
|
|||
struct sg_table *table;
|
||||
int err = 0;
|
||||
|
||||
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
|
||||
return 0;
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
@ -838,6 +830,24 @@ get_err:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
|
||||
u64 len, u32 attr, struct fastrpc_map **ppmap)
|
||||
{
|
||||
struct fastrpc_session_ctx *sess = fl->sctx;
|
||||
int err = 0;
|
||||
|
||||
if (!fastrpc_map_lookup(fl, fd, ppmap)) {
|
||||
if (!fastrpc_map_get(*ppmap))
|
||||
return 0;
|
||||
dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
|
||||
__func__, fd);
|
||||
}
|
||||
|
||||
err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fastrpc payload buffer with metadata looks like:
|
||||
*
|
||||
|
@ -910,8 +920,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
|
|||
ctx->args[i].length == 0)
|
||||
continue;
|
||||
|
||||
err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
|
||||
ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
|
||||
if (i < ctx->nbufs)
|
||||
err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
|
||||
ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
|
||||
else
|
||||
err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
|
||||
ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
|
||||
if (err) {
|
||||
dev_err(dev, "Error Creating map %d\n", err);
|
||||
return -EINVAL;
|
||||
|
@ -1067,6 +1081,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
|
|||
struct fastrpc_phy_page *pages;
|
||||
u64 *fdlist;
|
||||
int i, inbufs, outbufs, handles;
|
||||
int ret = 0;
|
||||
|
||||
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
|
||||
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
|
||||
|
@ -1082,23 +1097,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
|
|||
u64 len = rpra[i].buf.len;
|
||||
|
||||
if (!kernel) {
|
||||
if (copy_to_user((void __user *)dst, src, len))
|
||||
return -EFAULT;
|
||||
if (copy_to_user((void __user *)dst, src, len)) {
|
||||
ret = -EFAULT;
|
||||
goto cleanup_fdlist;
|
||||
}
|
||||
} else {
|
||||
memcpy(dst, src, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanup_fdlist:
|
||||
/* Clean up fdlist which is updated by DSP */
|
||||
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
|
||||
if (!fdlist[i])
|
||||
break;
|
||||
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
|
||||
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
|
||||
fastrpc_map_put(mmap);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
|
||||
|
|
|
@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
|
|||
}
|
||||
if (cmd->asv_length > DDCB_ASV_LENGTH) {
|
||||
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
|
||||
__func__, cmd->asiv_length);
|
||||
__func__, cmd->asv_length);
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
|
||||
|
|
|
@ -1858,7 +1858,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
|
|||
|
||||
static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
|
||||
{
|
||||
struct device_node *np, *nand_np;
|
||||
struct device_node *np;
|
||||
struct device *dev = nc->dev;
|
||||
int ret, reg_cells;
|
||||
u32 val;
|
||||
|
@ -1885,7 +1885,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
|
|||
|
||||
reg_cells += val;
|
||||
|
||||
for_each_child_of_node(np, nand_np) {
|
||||
for_each_child_of_node_scoped(np, nand_np) {
|
||||
struct atmel_nand *nand;
|
||||
|
||||
nand = atmel_nand_create(nc, nand_np, reg_cells);
|
||||
|
|
|
@ -752,7 +752,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
|
|||
|
||||
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
|
||||
{
|
||||
return ENA_HASH_KEY_SIZE;
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
struct ena_rss *rss = &adapter->ena_dev->rss;
|
||||
|
||||
return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
|
||||
}
|
||||
|
||||
static int ena_indirection_table_set(struct ena_adapter *adapter,
|
||||
|
|
|
@ -954,15 +954,18 @@ receive_packet (struct net_device *dev)
|
|||
} else {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = NULL;
|
||||
/* Small skbuffs for short packets */
|
||||
if (pkt_len > copy_thresh) {
|
||||
if (pkt_len <= copy_thresh)
|
||||
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
|
||||
if (!skb) {
|
||||
dma_unmap_single(&np->pdev->dev,
|
||||
desc_to_dma(desc),
|
||||
np->rx_buf_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
skb_put (skb = np->rx_skbuff[entry], pkt_len);
|
||||
np->rx_skbuff[entry] = NULL;
|
||||
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
|
||||
} else {
|
||||
dma_sync_single_for_cpu(&np->pdev->dev,
|
||||
desc_to_dma(desc),
|
||||
np->rx_buf_sz,
|
||||
|
|
|
@ -289,6 +289,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
|
|||
return;
|
||||
}
|
||||
cond_resched();
|
||||
if (mlx5_cmd_is_down(dev)) {
|
||||
ent->ret = -ENXIO;
|
||||
return;
|
||||
}
|
||||
} while (time_before(jiffies, poll_end));
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
|
@ -1059,7 +1063,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
poll_timeout(ent);
|
||||
/* make sure we read the descriptor after ownership is SW */
|
||||
rmb();
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -66,23 +66,11 @@ struct mlx5e_port_buffer {
|
|||
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
||||
u32 change, unsigned int mtu,
|
||||
struct ieee_pfc *pfc,
|
||||
u32 *buffer_size,
|
||||
u8 *prio2buffer);
|
||||
#else
|
||||
static inline int
|
||||
mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
|
||||
u32 change, unsigned int mtu,
|
||||
void *pfc,
|
||||
u32 *buffer_size,
|
||||
u8 *prio2buffer)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
|
||||
struct mlx5e_port_buffer *port_buffer);
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#include "eswitch.h"
|
||||
#include "en.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en/port_buffer.h"
|
||||
#include "en_tc.h"
|
||||
#include "en_rep.h"
|
||||
#include "en_accel/ipsec.h"
|
||||
|
@ -2723,11 +2722,9 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
|
|||
struct mlx5e_params *params = &priv->channels.params;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 mtu, prev_mtu;
|
||||
u16 mtu;
|
||||
int err;
|
||||
|
||||
mlx5e_query_mtu(mdev, params, &prev_mtu);
|
||||
|
||||
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -2737,18 +2734,6 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
|
|||
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
|
||||
__func__, mtu, params->sw_mtu);
|
||||
|
||||
if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
|
||||
err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
|
||||
NULL, NULL, NULL);
|
||||
if (err) {
|
||||
netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
|
||||
__func__, mtu, err, prev_mtu);
|
||||
|
||||
mlx5e_set_mtu(mdev, params, prev_mtu);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
params->sw_mtu = mtu;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ struct mlx5_fw_reset {
|
|||
struct work_struct reset_reload_work;
|
||||
struct work_struct reset_now_work;
|
||||
struct work_struct reset_abort_work;
|
||||
struct delayed_work reset_timeout_work;
|
||||
unsigned long reset_flags;
|
||||
u8 reset_method;
|
||||
struct timer_list timer;
|
||||
|
@ -257,6 +258,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
|
|||
return -EALREADY;
|
||||
}
|
||||
|
||||
if (current_work() != &fw_reset->reset_timeout_work.work)
|
||||
cancel_delayed_work(&fw_reset->reset_timeout_work);
|
||||
mlx5_stop_sync_reset_poll(dev);
|
||||
if (poll_health)
|
||||
mlx5_start_health_poll(dev);
|
||||
|
@ -327,6 +330,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
|
|||
}
|
||||
mlx5_stop_health_poll(dev, true);
|
||||
mlx5_start_sync_reset_poll(dev);
|
||||
|
||||
if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
|
||||
&fw_reset->reset_flags))
|
||||
schedule_delayed_work(&fw_reset->reset_timeout_work,
|
||||
msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -700,6 +708,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
|
|||
}
|
||||
}
|
||||
|
||||
static void mlx5_sync_reset_timeout_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = container_of(work, struct delayed_work,
|
||||
work);
|
||||
struct mlx5_fw_reset *fw_reset =
|
||||
container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
|
||||
struct mlx5_core_dev *dev = fw_reset->dev;
|
||||
|
||||
if (mlx5_sync_reset_clear_reset_requested(dev, true))
|
||||
return;
|
||||
mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
|
||||
}
|
||||
|
||||
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
|
||||
|
@ -783,6 +804,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
|
|||
cancel_work_sync(&fw_reset->reset_reload_work);
|
||||
cancel_work_sync(&fw_reset->reset_now_work);
|
||||
cancel_work_sync(&fw_reset->reset_abort_work);
|
||||
cancel_delayed_work(&fw_reset->reset_timeout_work);
|
||||
}
|
||||
|
||||
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
|
||||
|
@ -826,6 +848,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
|
|||
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
|
||||
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
|
||||
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
|
||||
INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
|
||||
mlx5_sync_reset_timeout_work);
|
||||
|
||||
init_completion(&fw_reset->done);
|
||||
return 0;
|
||||
|
|
|
@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
|||
u32 func_id;
|
||||
u32 npages;
|
||||
u32 i = 0;
|
||||
int err;
|
||||
|
||||
if (!mlx5_cmd_is_down(dev))
|
||||
return mlx5_cmd_do(dev, in, in_size, out, out_size);
|
||||
err = mlx5_cmd_do(dev, in, in_size, out, out_size);
|
||||
/* If FW is gone (-ENXIO), proceed to forceful reclaim */
|
||||
if (err != -ENXIO)
|
||||
return err;
|
||||
|
||||
/* No hard feelings, we want our pages back! */
|
||||
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
|
||||
|
|
|
@ -1413,7 +1413,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
|
|||
struct nfp_net *nn = netdev_priv(netdev);
|
||||
|
||||
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
|
||||
return nfp_net_rss_key_sz(nn);
|
||||
}
|
||||
|
|
|
@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev)
|
|||
asix_read_medium_status(dev, 1));
|
||||
}
|
||||
|
||||
/* Notes on PM callbacks and locking context:
|
||||
*
|
||||
* - asix_suspend()/asix_resume() are invoked for both runtime PM and
|
||||
* system-wide suspend/resume. For struct usb_driver the ->resume()
|
||||
* callback does not receive pm_message_t, so the resume type cannot
|
||||
* be distinguished here.
|
||||
*
|
||||
* - The MAC driver must hold RTNL when calling phylink interfaces such as
|
||||
* phylink_suspend()/resume(). Those calls will also perform MDIO I/O.
|
||||
*
|
||||
* - Taking RTNL and doing MDIO from a runtime-PM resume callback (while
|
||||
* the USB PM lock is held) is fragile. Since autosuspend brings no
|
||||
* measurable power saving here, we block it by holding a PM usage
|
||||
* reference in ax88772_bind().
|
||||
*/
|
||||
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct usbnet *dev = usb_get_intfdata(intf);
|
||||
|
@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
if (ret)
|
||||
goto initphy_err;
|
||||
|
||||
/* Keep this interface runtime-PM active by taking a usage ref.
|
||||
* Prevents runtime suspend while bound and avoids resume paths
|
||||
* that could deadlock (autoresume under RTNL while USB PM lock
|
||||
* is held, phylink/MDIO wants RTNL).
|
||||
*/
|
||||
pm_runtime_get_noresume(&intf->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
initphy_err:
|
||||
|
@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
|
|||
phylink_destroy(priv->phylink);
|
||||
ax88772_mdio_unregister(priv);
|
||||
asix_rx_fixup_common_free(dev->driver_priv);
|
||||
/* Drop the PM usage ref taken in bind() */
|
||||
pm_runtime_put(&intf->dev);
|
||||
}
|
||||
|
||||
static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = {
|
|||
.resume = asix_resume,
|
||||
.reset_resume = asix_resume,
|
||||
.disconnect = usbnet_disconnect,
|
||||
/* usbnet enables autosuspend by default (supports_autosuspend=1).
|
||||
* We keep runtime-PM active for AX88772* by taking a PM usage
|
||||
* reference in ax88772_bind() (pm_runtime_get_noresume()) and
|
||||
* dropping it in unbind(), which effectively blocks autosuspend.
|
||||
*/
|
||||
.supports_autosuspend = 1,
|
||||
.disable_hub_initiated_lpm = 1,
|
||||
};
|
||||
|
|
|
@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
|
|||
rtl8150_t *dev = netdev_priv(netdev);
|
||||
u16 rx_creg = 0x9e;
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
if (netdev->flags & IFF_PROMISC) {
|
||||
rx_creg |= 0x0001;
|
||||
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
|
||||
|
@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
|
|||
rx_creg &= 0x00fc;
|
||||
}
|
||||
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
|
||||
|
|
|
@ -1763,33 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
|
|||
|
||||
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
|
||||
{
|
||||
unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
|
||||
unsigned long time_left, i;
|
||||
|
||||
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
|
||||
WMI_SERVICE_READY_TIMEOUT_HZ);
|
||||
if (!time_left) {
|
||||
/* Sometimes the PCI HIF doesn't receive interrupt
|
||||
* for the service ready message even if the buffer
|
||||
* was completed. PCIe sniffer shows that it's
|
||||
* because the corresponding CE ring doesn't fires
|
||||
* it. Workaround here by polling CE rings once.
|
||||
*/
|
||||
ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
|
||||
|
||||
/* Sometimes the PCI HIF doesn't receive interrupt
|
||||
* for the service ready message even if the buffer
|
||||
* was completed. PCIe sniffer shows that it's
|
||||
* because the corresponding CE ring doesn't fires
|
||||
* it. Workaround here by polling CE rings. Since
|
||||
* the message could arrive at any time, continue
|
||||
* polling until timeout.
|
||||
*/
|
||||
do {
|
||||
for (i = 0; i < CE_COUNT; i++)
|
||||
ath10k_hif_send_complete_check(ar, i, 1);
|
||||
|
||||
/* The 100 ms granularity is a tradeoff considering scheduler
|
||||
* overhead and response latency
|
||||
*/
|
||||
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
|
||||
WMI_SERVICE_READY_TIMEOUT_HZ);
|
||||
if (!time_left) {
|
||||
ath10k_warn(ar, "polling timed out\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
msecs_to_jiffies(100));
|
||||
if (time_left)
|
||||
return 0;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
ath10k_warn(ar, "service ready completion received, continuing normally\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
ath10k_warn(ar, "failed to receive service ready completion\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
|
||||
|
|
|
@ -659,10 +659,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
|
|||
return;
|
||||
}
|
||||
|
||||
/* Don't send world or same regdom info to firmware */
|
||||
if (strncmp(request->alpha2, "00", 2) &&
|
||||
strncmp(request->alpha2, adapter->country_code,
|
||||
sizeof(request->alpha2))) {
|
||||
/* Don't send same regdom info to firmware */
|
||||
if (strncmp(request->alpha2, adapter->country_code,
|
||||
sizeof(request->alpha2)) != 0) {
|
||||
memcpy(adapter->country_code, request->alpha2,
|
||||
sizeof(request->alpha2));
|
||||
mwifiex_send_domain_info_cmd_fw(wiphy);
|
||||
|
|
|
@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
|
|||
|
||||
return 0;
|
||||
error:
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
mt76_free_device(mdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -207,7 +207,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
|
|||
|
||||
static int ser_send_msg(struct rtw89_ser *ser, u8 event)
|
||||
{
|
||||
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
|
||||
struct ser_msg *msg = NULL;
|
||||
|
||||
if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
|
||||
|
@ -223,7 +222,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
|
|||
list_add(&msg->list, &ser->msg_q);
|
||||
spin_unlock_irq(&ser->msg_q_lock);
|
||||
|
||||
ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
|
||||
schedule_work(&ser->ser_hdl_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user