mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-17 02:59:58 +02:00
This is the 6.6.93 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmhAPzsACgkQONu9yGCS aT6KiRAAmIFMQ4HtiUZLJEF9r2rn/J+swHSF+a137JEYe4EKnNgPtKYkMAi8OZN1 x1OT+CKRP6mQpMD044tXp/Fxoq7AF2zJXa962AfeQID7SrNgIwm6U7KFtGVo4ERs y1XU011QbjcY8twOYzmZQviIGKD3oOoj4vxO4h6R9t+ZBzSCzfYEd+6yq/tvMb7M vjdc7PCAVt9w9ytXLOYDHVwGJ3dMK92h7OTOaKHxf9XGGcIbvdkqqypqV6wuKjh2 YGsZYivp+GQWAWnoVsX5Q8o77TZ8cwnZLEos8v55HzZ/eFBlQgkzaIfW//QwXXyM czlv7YadC0D7mWptJ16niIJHdxsODhbtvdZHC/zQ4/kc9WnizKjoBHJsbe6zlyFf fDGs7HgzSYAMbktoilbhb5m1bb4HjyyUwUC8KujlYTFgo7GscQAYt5eSDa40AfKy cG4jOOhNkt9nSGtI7YggwEH8rFf1O2tWnheXPyVL5b0RRVNeSD1y+mypShI7fLUj nkQbgWZwtlhibjw/dUyIN986Mbp7YoHmRVFc0b6NpZG6Kokjl3QSwffEUlMf4iPP wLiqkEOPWZ56YL9JbNb+q9SNY/MDL/CaTnQJv7XAjjJ0D69ct7ecbfxTre7LyMFd Rg+6wrzx1nsj4ObXcmiI1O0bN3GTJWqPyM6UaoL/0dPUtVxocIo= =0aL2 -----END PGP SIGNATURE----- Merge tag 'v6.6.93' into v6.6/standard/base This is the 6.6.93 stable release # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCgAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmhAPzsACgkQONu9yGCS # aT6KiRAAmIFMQ4HtiUZLJEF9r2rn/J+swHSF+a137JEYe4EKnNgPtKYkMAi8OZN1 # x1OT+CKRP6mQpMD044tXp/Fxoq7AF2zJXa962AfeQID7SrNgIwm6U7KFtGVo4ERs # y1XU011QbjcY8twOYzmZQviIGKD3oOoj4vxO4h6R9t+ZBzSCzfYEd+6yq/tvMb7M # vjdc7PCAVt9w9ytXLOYDHVwGJ3dMK92h7OTOaKHxf9XGGcIbvdkqqypqV6wuKjh2 # YGsZYivp+GQWAWnoVsX5Q8o77TZ8cwnZLEos8v55HzZ/eFBlQgkzaIfW//QwXXyM # czlv7YadC0D7mWptJ16niIJHdxsODhbtvdZHC/zQ4/kc9WnizKjoBHJsbe6zlyFf # fDGs7HgzSYAMbktoilbhb5m1bb4HjyyUwUC8KujlYTFgo7GscQAYt5eSDa40AfKy # cG4jOOhNkt9nSGtI7YggwEH8rFf1O2tWnheXPyVL5b0RRVNeSD1y+mypShI7fLUj # nkQbgWZwtlhibjw/dUyIN986Mbp7YoHmRVFc0b6NpZG6Kokjl3QSwffEUlMf4iPP # wLiqkEOPWZ56YL9JbNb+q9SNY/MDL/CaTnQJv7XAjjJ0D69ct7ecbfxTre7LyMFd # Rg+6wrzx1nsj4ObXcmiI1O0bN3GTJWqPyM6UaoL/0dPUtVxocIo= # =0aL2 # -----END PGP SIGNATURE----- # gpg: Signature made Wed 04 Jun 2025 08:42:35 AM EDT # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
This commit is contained in:
commit
dbc2820565
|
@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format
|
|||
correlates to the operations allowed. It's visible only
|
||||
on platforms that support the capability.
|
||||
|
||||
What: /sys/bus/dsa/devices/wq<m>.<n>/driver_name
|
||||
Date: Sept 8, 2023
|
||||
KernelVersion: 6.7.0
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Name of driver to be bounded to the wq.
|
||||
|
||||
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
|
||||
Date: Oct 25, 2019
|
||||
KernelVersion: 5.6.0
|
||||
|
|
|
@ -5978,6 +5978,8 @@
|
|||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
Selecting specific mitigation does not force enable
|
||||
user mitigations.
|
||||
|
||||
Selecting 'off' will disable both the kernel and
|
||||
the user space protections.
|
||||
|
|
|
@ -103,4 +103,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO.
|
|||
.. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
|
||||
:identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
|
||||
mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
|
||||
mctrl_gpio_disable_ms
|
||||
mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync
|
||||
|
|
|
@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard
|
|||
=============================== ======= =======================================
|
||||
Name Perm Description
|
||||
=============================== ======= =======================================
|
||||
fan[1-3]_input RO Fan speed in RPM.
|
||||
fan[1-3]_label RO Fan label.
|
||||
fan[1-3]_min RO Minimal Fan speed in RPM
|
||||
fan[1-3]_max RO Maximal Fan speed in RPM
|
||||
fan[1-3]_target RO Expected Fan speed in RPM
|
||||
pwm[1-3] RW Control the fan PWM duty-cycle.
|
||||
fan[1-4]_input RO Fan speed in RPM.
|
||||
fan[1-4]_label RO Fan label.
|
||||
fan[1-4]_min RO Minimal Fan speed in RPM
|
||||
fan[1-4]_max RO Maximal Fan speed in RPM
|
||||
fan[1-4]_target RO Expected Fan speed in RPM
|
||||
pwm[1-4] RW Control the fan PWM duty-cycle.
|
||||
pwm1_enable WO Enable or disable automatic BIOS fan
|
||||
control (not supported on all laptops,
|
||||
see below for details).
|
||||
|
@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches!
|
|||
---------------------------
|
||||
|
||||
The driver also exports the fans as thermal cooling devices with
|
||||
``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control
|
||||
``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control
|
||||
using one of the thermal governors.
|
||||
|
||||
Module parameters
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 92
|
||||
SUBLEVEL = 93
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@
|
|||
reg = <0x54400000 0x00040000>;
|
||||
clocks = <&tegra_car TEGRA114_CLK_DSIB>,
|
||||
<&tegra_car TEGRA114_CLK_DSIBLP>,
|
||||
<&tegra_car TEGRA114_CLK_PLL_D2_OUT0>;
|
||||
<&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
|
||||
clock-names = "dsi", "lp", "parent";
|
||||
resets = <&tegra_car 82>;
|
||||
reset-names = "dsi";
|
||||
|
|
|
@ -538,11 +538,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
|
|||
|
||||
static int at91_suspend_finish(unsigned long val)
|
||||
{
|
||||
unsigned char modified_gray_code[] = {
|
||||
0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
|
||||
0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
|
||||
0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
|
||||
0x10, 0x11,
|
||||
/* SYNOPSYS workaround to fix a bug in the calibration logic */
|
||||
unsigned char modified_fix_code[] = {
|
||||
0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
|
||||
0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
|
||||
0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
|
||||
0x1e, 0x1f,
|
||||
};
|
||||
unsigned int tmp, index;
|
||||
int i;
|
||||
|
@ -553,25 +554,25 @@ static int at91_suspend_finish(unsigned long val)
|
|||
* restore the ZQ0SR0 with the value saved here. But the
|
||||
* calibration is buggy and restoring some values from ZQ0SR0
|
||||
* is forbidden and risky thus we need to provide processed
|
||||
* values for these (modified gray code values).
|
||||
* values for these.
|
||||
*/
|
||||
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
|
||||
|
||||
/* Store pull-down output impedance select. */
|
||||
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
|
||||
soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
|
||||
soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
|
||||
|
||||
/* Store pull-up output impedance select. */
|
||||
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
|
||||
|
||||
/* Store pull-down on-die termination impedance select. */
|
||||
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
|
||||
|
||||
/* Store pull-up on-die termination impedance select. */
|
||||
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||||
soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
|
||||
|
||||
/*
|
||||
* The 1st 8 words of memory might get corrupted in the process
|
||||
|
|
|
@ -151,28 +151,12 @@
|
|||
vcc-pg-supply = <®_aldo1>;
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
linux,rc-map-name = "rc-beelink-gs1";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&r_pio {
|
||||
/*
|
||||
* FIXME: We can't add that supply for now since it would
|
||||
* create a circular dependency between pinctrl, the regulator
|
||||
* and the RSB Bus.
|
||||
*
|
||||
* vcc-pl-supply = <®_aldo1>;
|
||||
*/
|
||||
vcc-pm-supply = <®_aldo1>;
|
||||
};
|
||||
|
||||
&r_rsb {
|
||||
&r_i2c {
|
||||
status = "okay";
|
||||
|
||||
axp805: pmic@745 {
|
||||
axp805: pmic@36 {
|
||||
compatible = "x-powers,axp805", "x-powers,axp806";
|
||||
reg = <0x745>;
|
||||
reg = <0x36>;
|
||||
interrupt-parent = <&r_intc>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
|
@ -290,6 +274,22 @@
|
|||
};
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
linux,rc-map-name = "rc-beelink-gs1";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&r_pio {
|
||||
/*
|
||||
* PL0 and PL1 are used for PMIC I2C
|
||||
* don't enable the pl-supply else
|
||||
* it will fail at boot
|
||||
*
|
||||
* vcc-pl-supply = <®_aldo1>;
|
||||
*/
|
||||
vcc-pm-supply = <®_aldo1>;
|
||||
};
|
||||
|
||||
&spdif {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&spdif_tx_pin>;
|
||||
|
|
|
@ -175,16 +175,12 @@
|
|||
vcc-pg-supply = <®_vcc_wifi_io>;
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&r_rsb {
|
||||
&r_i2c {
|
||||
status = "okay";
|
||||
|
||||
axp805: pmic@745 {
|
||||
axp805: pmic@36 {
|
||||
compatible = "x-powers,axp805", "x-powers,axp806";
|
||||
reg = <0x745>;
|
||||
reg = <0x36>;
|
||||
interrupt-parent = <&r_intc>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
|
@ -295,6 +291,10 @@
|
|||
};
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&rtc {
|
||||
clocks = <&ext_osc32k>;
|
||||
};
|
||||
|
|
|
@ -112,20 +112,12 @@
|
|||
vcc-pg-supply = <®_aldo1>;
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&r_pio {
|
||||
vcc-pm-supply = <®_bldo3>;
|
||||
};
|
||||
|
||||
&r_rsb {
|
||||
&r_i2c {
|
||||
status = "okay";
|
||||
|
||||
axp805: pmic@745 {
|
||||
axp805: pmic@36 {
|
||||
compatible = "x-powers,axp805", "x-powers,axp806";
|
||||
reg = <0x745>;
|
||||
reg = <0x36>;
|
||||
interrupt-parent = <&r_intc>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-controller;
|
||||
|
@ -240,6 +232,14 @@
|
|||
};
|
||||
};
|
||||
|
||||
&r_ir {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&r_pio {
|
||||
vcc-pm-supply = <®_bldo3>;
|
||||
};
|
||||
|
||||
&rtc {
|
||||
clocks = <&ext_osc32k>;
|
||||
};
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
|
||||
leds {
|
||||
compatible = "gpio-leds";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&spi_quad_pins>;
|
||||
|
||||
led-power1 {
|
||||
label = "udpu:green:power";
|
||||
|
@ -82,8 +84,6 @@
|
|||
|
||||
&spi0 {
|
||||
status = "okay";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&spi_quad_pins>;
|
||||
|
||||
flash@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
|
@ -108,6 +108,10 @@
|
|||
};
|
||||
};
|
||||
|
||||
&spi_quad_pins {
|
||||
function = "gpio";
|
||||
};
|
||||
|
||||
&pinctrl_nb {
|
||||
i2c2_recovery_pins: i2c2-recovery-pins {
|
||||
groups = "i2c2";
|
||||
|
|
|
@ -1635,7 +1635,7 @@
|
|||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-always-on;
|
||||
gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
|
||||
enable-active-high;
|
||||
vin-supply = <&vdd_1v8>;
|
||||
};
|
||||
|
|
|
@ -102,6 +102,16 @@
|
|||
};
|
||||
|
||||
pcie@141a0000 {
|
||||
reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
|
||||
0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */
|
||||
0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
|
||||
0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
|
||||
0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB) */
|
||||
|
||||
ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */
|
||||
0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000 /* non-prefetchable memory (128MB) */
|
||||
0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>; /* prefetchable memory (25088MB) */
|
||||
|
||||
status = "okay";
|
||||
vddio-pex-ctl-supply = <&vdd_1v8_ls>;
|
||||
phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
|
||||
|
|
|
@ -231,6 +231,8 @@
|
|||
interrupts = <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
qcom,ee = <1>;
|
||||
qcom,num-ees = <4>;
|
||||
num-channels = <16>;
|
||||
qcom,controlled-remotely;
|
||||
};
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@
|
|||
no-map;
|
||||
};
|
||||
|
||||
pil_camera_mem: mmeory@85200000 {
|
||||
pil_camera_mem: memory@85200000 {
|
||||
reg = <0x0 0x85200000 0x0 0x500000>;
|
||||
no-map;
|
||||
};
|
||||
|
|
|
@ -4233,6 +4233,8 @@
|
|||
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
qcom,ee = <0>;
|
||||
qcom,num-ees = <4>;
|
||||
num-channels = <16>;
|
||||
qcom,controlled-remotely;
|
||||
iommus = <&apps_smmu 0x584 0x11>,
|
||||
<&apps_smmu 0x588 0x0>,
|
||||
|
|
|
@ -1866,6 +1866,8 @@
|
|||
interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#dma-cells = <1>;
|
||||
qcom,ee = <0>;
|
||||
qcom,num-ees = <4>;
|
||||
num-channels = <20>;
|
||||
qcom,controlled-remotely;
|
||||
iommus = <&apps_smmu 0x480 0x0>,
|
||||
<&apps_smmu 0x481 0x0>;
|
||||
|
|
|
@ -43,6 +43,17 @@
|
|||
regulator-boot-on;
|
||||
};
|
||||
|
||||
vsys_5v0: regulator-vsys5v0 {
|
||||
/* Output of LM61460 */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vsys_5v0";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
vin-supply = <&vusb_main>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
vsys_3v3: regulator-vsys3v3 {
|
||||
/* Output of LM5141 */
|
||||
compatible = "regulator-fixed";
|
||||
|
@ -75,7 +86,7 @@
|
|||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-boot-on;
|
||||
vin-supply = <&vsys_3v3>;
|
||||
vin-supply = <&vsys_5v0>;
|
||||
gpios = <&main_gpio0 49 GPIO_ACTIVE_HIGH>;
|
||||
states = <1800000 0x0>,
|
||||
<3300000 0x1>;
|
||||
|
|
|
@ -10,39 +10,44 @@
|
|||
|
||||
#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
|
||||
/ {
|
||||
pss_ref_clk: pss_ref_clk {
|
||||
pss_ref_clk: pss-ref-clk {
|
||||
bootph-all;
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <33333333>;
|
||||
clock-output-names = "pss_ref_clk";
|
||||
};
|
||||
|
||||
video_clk: video_clk {
|
||||
video_clk: video-clk {
|
||||
bootph-all;
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <27000000>;
|
||||
clock-output-names = "video_clk";
|
||||
};
|
||||
|
||||
pss_alt_ref_clk: pss_alt_ref_clk {
|
||||
pss_alt_ref_clk: pss-alt-ref-clk {
|
||||
bootph-all;
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <0>;
|
||||
clock-output-names = "pss_alt_ref_clk";
|
||||
};
|
||||
|
||||
gt_crx_ref_clk: gt_crx_ref_clk {
|
||||
gt_crx_ref_clk: gt-crx-ref-clk {
|
||||
bootph-all;
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <108000000>;
|
||||
clock-output-names = "gt_crx_ref_clk";
|
||||
};
|
||||
|
||||
aux_ref_clk: aux_ref_clk {
|
||||
aux_ref_clk: aux-ref-clk {
|
||||
bootph-all;
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <27000000>;
|
||||
clock-output-names = "aux_ref_clk";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -132,6 +132,7 @@
|
|||
#define FUJITSU_CPU_PART_A64FX 0x001
|
||||
|
||||
#define HISI_CPU_PART_TSV110 0xD01
|
||||
#define HISI_CPU_PART_HIP09 0xD02
|
||||
|
||||
#define APPLE_CPU_PART_M1_ICESTORM 0x022
|
||||
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
|
||||
|
@ -208,6 +209,7 @@
|
|||
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
||||
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
|
||||
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
|
||||
#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
|
||||
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
|
||||
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
|
||||
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
|
||||
|
|
|
@ -679,7 +679,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
|||
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
#define pud_none(pud) (!pud_val(pud))
|
||||
#define pud_bad(pud) (!pud_table(pud))
|
||||
#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
|
||||
PUD_TYPE_TABLE)
|
||||
#define pud_present(pud) pte_present(pud_pte(pud))
|
||||
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
|
||||
#define pud_valid(pud) pte_valid(pud_pte(pud))
|
||||
|
|
|
@ -904,6 +904,7 @@ static u8 spectre_bhb_loop_affected(void)
|
|||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
|
||||
MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||
|
|
|
@ -87,4 +87,20 @@ struct dyn_arch_ftrace {
|
|||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Some syscall entry functions on mips start with "__sys_" (fork and clone,
|
||||
* for instance). We should also match the sys_ variant with those.
|
||||
*/
|
||||
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
||||
static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
const char *name)
|
||||
{
|
||||
return !strcmp(sym, name) ||
|
||||
(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FTRACE_SYSCALLS */
|
||||
#endif /* _ASM_MIPS_FTRACE_H */
|
||||
|
|
|
@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
|
|||
/* Indicates online CPUs coupled with the current CPU */
|
||||
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
|
||||
|
||||
/*
|
||||
* Used to synchronize entry to deep idle states. Actually per-core rather
|
||||
* than per-CPU.
|
||||
*/
|
||||
/* Used to synchronize entry to deep idle states */
|
||||
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
|
||||
|
||||
/* Saved CPU state across the CPS_PM_POWER_GATED state */
|
||||
|
@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
|
|||
cps_nc_entry_fn entry;
|
||||
struct core_boot_config *core_cfg;
|
||||
struct vpe_boot_config *vpe_cfg;
|
||||
atomic_t *barrier;
|
||||
|
||||
/* Check that there is an entry function for this state */
|
||||
entry = per_cpu(nc_asm_enter, core)[state];
|
||||
entry = per_cpu(nc_asm_enter, cpu)[state];
|
||||
if (!entry)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
|
|||
smp_mb__after_atomic();
|
||||
|
||||
/* Create a non-coherent mapping of the core ready_count */
|
||||
core_ready_count = per_cpu(ready_count, core);
|
||||
core_ready_count = per_cpu(ready_count, cpu);
|
||||
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
|
||||
(unsigned long)core_ready_count);
|
||||
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
|
||||
|
@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
|
|||
|
||||
/* Ensure ready_count is zero-initialised before the assembly runs */
|
||||
WRITE_ONCE(*nc_core_ready_count, 0);
|
||||
coupled_barrier(&per_cpu(pm_barrier, core), online);
|
||||
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
|
||||
coupled_barrier(barrier, online);
|
||||
|
||||
/* Run the generated entry code */
|
||||
left = entry(online, nc_core_ready_count);
|
||||
|
@ -635,12 +634,14 @@ out_err:
|
|||
|
||||
static int cps_pm_online_cpu(unsigned int cpu)
|
||||
{
|
||||
enum cps_pm_state state;
|
||||
unsigned core = cpu_core(&cpu_data[cpu]);
|
||||
unsigned int sibling, core;
|
||||
void *entry_fn, *core_rc;
|
||||
enum cps_pm_state state;
|
||||
|
||||
core = cpu_core(&cpu_data[cpu]);
|
||||
|
||||
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
||||
if (per_cpu(nc_asm_enter, core)[state])
|
||||
if (per_cpu(nc_asm_enter, cpu)[state])
|
||||
continue;
|
||||
if (!test_bit(state, state_support))
|
||||
continue;
|
||||
|
@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
|
|||
clear_bit(state, state_support);
|
||||
}
|
||||
|
||||
per_cpu(nc_asm_enter, core)[state] = entry_fn;
|
||||
for_each_cpu(sibling, &cpu_sibling_map[cpu])
|
||||
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
|
||||
}
|
||||
|
||||
if (!per_cpu(ready_count, core)) {
|
||||
if (!per_cpu(ready_count, cpu)) {
|
||||
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
|
||||
if (!core_rc) {
|
||||
pr_err("Failed allocate core %u ready_count\n", core);
|
||||
return -ENOMEM;
|
||||
}
|
||||
per_cpu(ready_count, core) = core_rc;
|
||||
|
||||
for_each_cpu(sibling, &cpu_sibling_map[cpu])
|
||||
per_cpu(ready_count, sibling) = core_rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[];
|
|||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
extern unsigned long max_pfn;
|
||||
u64 memory_hotplug_max(void);
|
||||
u64 hot_add_drconf_memory_max(void);
|
||||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif
|
||||
|
|
|
@ -2974,11 +2974,11 @@ static void __init fixup_device_tree_pmac(void)
|
|||
char type[8];
|
||||
phandle node;
|
||||
|
||||
// Some pmacs are missing #size-cells on escc nodes
|
||||
// Some pmacs are missing #size-cells on escc or i2s nodes
|
||||
for (node = 0; prom_next_node(&node); ) {
|
||||
type[0] = '\0';
|
||||
prom_getprop(node, "device_type", type, sizeof(type));
|
||||
if (prom_strcmp(type, "escc"))
|
||||
if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
|
||||
continue;
|
||||
|
||||
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
|
||||
|
|
|
@ -912,7 +912,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
|
||||
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
||||
{
|
||||
if (radix_enabled())
|
||||
|
@ -920,6 +920,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
|||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
|
||||
unsigned long addr, unsigned long next)
|
||||
|
|
|
@ -1342,7 +1342,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
|
|||
return nid;
|
||||
}
|
||||
|
||||
static u64 hot_add_drconf_memory_max(void)
|
||||
u64 hot_add_drconf_memory_max(void)
|
||||
{
|
||||
struct device_node *memory = NULL;
|
||||
struct device_node *dn = NULL;
|
||||
|
|
|
@ -2229,6 +2229,10 @@ static struct pmu power_pmu = {
|
|||
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
|
||||
PERF_SAMPLE_PHYS_ADDR | \
|
||||
PERF_SAMPLE_DATA_PAGE_SIZE)
|
||||
|
||||
#define SIER_TYPE_SHIFT 15
|
||||
#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
|
||||
|
||||
/*
|
||||
* A counter has overflowed; update its count and record
|
||||
* things if requested. Note that interrupts are hard-disabled
|
||||
|
@ -2297,6 +2301,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|||
is_kernel_addr(mfspr(SPRN_SIAR)))
|
||||
record = 0;
|
||||
|
||||
/*
|
||||
* SIER[46-48] presents instruction type of the sampled instruction.
|
||||
* In ISA v3.0 and before values "0" and "7" are considered reserved.
|
||||
* In ISA v3.1, value "7" has been used to indicate "larx/stcx".
|
||||
* Drop the sample if "type" has reserved values for this field with a
|
||||
* ISA version check.
|
||||
*/
|
||||
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
|
||||
ppmu->get_mem_data_src) {
|
||||
val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
|
||||
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
|
||||
record = 0;
|
||||
atomic64_inc(&event->lost_samples);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally record data if requested.
|
||||
*/
|
||||
|
|
|
@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
|
|||
|
||||
sier = mfspr(SPRN_SIER);
|
||||
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
|
||||
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
|
||||
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
|
||||
dsrc->val = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
|
||||
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
|
||||
|
|
|
@ -1183,17 +1183,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
|
|||
|
||||
static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
{
|
||||
resource_size_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
resource_size_t max_addr;
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
struct resource res;
|
||||
|
||||
if (of_address_to_resource(memory, 0, &res))
|
||||
continue;
|
||||
|
||||
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
|
||||
}
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||
max_addr = hot_add_drconf_memory_max();
|
||||
#else
|
||||
max_addr = memblock_end_of_DRAM();
|
||||
#endif
|
||||
|
||||
return max_addr;
|
||||
}
|
||||
|
@ -1471,7 +1467,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||
window->direct = true;
|
||||
|
||||
/* DDW maps the whole partition, so enable direct DMA mapping */
|
||||
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
|
||||
ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
|
||||
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
||||
if (ret) {
|
||||
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
||||
|
@ -1658,11 +1654,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
|||
struct memory_notify *arg = data;
|
||||
int ret = 0;
|
||||
|
||||
/* This notifier can get called when onlining persistent memory as well.
|
||||
* TCEs are not pre-mapped for persistent memory. Persistent memory will
|
||||
* always be above ddw_memory_hotplug_max()
|
||||
*/
|
||||
|
||||
switch (action) {
|
||||
case MEM_GOING_ONLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
|
@ -1674,7 +1676,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
|||
case MEM_OFFLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
|
|
|
@ -26,12 +26,9 @@
|
|||
* When not using MMU this corresponds to the first free page in
|
||||
* physical memory (aligned on a page boundary).
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_64BIT
|
||||
#define PAGE_OFFSET kernel_map.page_offset
|
||||
#else
|
||||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
#endif
|
||||
/*
|
||||
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
|
||||
* define the PAGE_OFFSET value for SV48 and SV39.
|
||||
|
@ -41,6 +38,9 @@
|
|||
#else
|
||||
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
#endif /* CONFIG_64BIT */
|
||||
#else
|
||||
#define PAGE_OFFSET ((unsigned long)phys_ram_base)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -97,11 +97,7 @@ typedef struct page *pgtable_t;
|
|||
#define MIN_MEMBLOCK_ADDR 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
|
||||
#else
|
||||
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
struct kernel_mapping {
|
||||
unsigned long page_offset;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <asm/pgtable-bits.h>
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
#define KERNEL_LINK_ADDR PAGE_OFFSET
|
||||
#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
#define KERN_VIRT_SIZE (UL(-1))
|
||||
#else
|
||||
|
||||
|
|
|
@ -208,6 +208,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
|||
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
|
||||
cpu_info));
|
||||
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||
if (IS_ERR(cpu_dir))
|
||||
return PTR_ERR(cpu_dir);
|
||||
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
|
||||
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
|
||||
|
|
|
@ -151,5 +151,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
|
|||
archclean:
|
||||
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
|
||||
-o -name '*.gcov' \) -type f -print | xargs rm -f
|
||||
$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
|
||||
|
||||
export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH
|
||||
|
|
|
@ -68,6 +68,7 @@ void __init mem_init(void)
|
|||
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
|
||||
memblock_free((void *)brk_end, uml_reserved - brk_end);
|
||||
uml_reserved = brk_end;
|
||||
min_low_pfn = PFN_UP(__pa(uml_reserved));
|
||||
|
||||
/* this will put all low memory onto the freelists */
|
||||
memblock_free_all();
|
||||
|
|
|
@ -43,7 +43,7 @@ endif
|
|||
|
||||
# How to compile the 16-bit code. Note we always compile for -march=i386;
|
||||
# that way we can complain to the user if the CPU is insufficient.
|
||||
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
|
||||
REALMODE_CFLAGS := -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
|
||||
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
# This script requires:
|
||||
# bash
|
||||
# syslinux
|
||||
# genisoimage
|
||||
# mtools (for fdimage* and hdimage)
|
||||
# edk2/OVMF (for hdimage)
|
||||
#
|
||||
|
@ -251,7 +252,9 @@ geniso() {
|
|||
cp "$isolinux" "$ldlinux" "$tmp_dir"
|
||||
cp "$FBZIMAGE" "$tmp_dir"/linux
|
||||
echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
|
||||
cp "${FDINITRDS[@]}" "$tmp_dir"/
|
||||
if [ ${#FDINITRDS[@]} -gt 0 ]; then
|
||||
cp "${FDINITRDS[@]}" "$tmp_dir"/
|
||||
fi
|
||||
genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
|
||||
-quiet -o "$FIMAGE" -b isolinux.bin \
|
||||
-c boot.cat -no-emul-boot -boot-load-size 4 \
|
||||
|
|
|
@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
|
|||
* entirely in the C code, and use an alias emitted by the linker script
|
||||
* instead.
|
||||
*/
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
#if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP)
|
||||
EXPORT_SYMBOL(__ref_stack_chk_guard);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -272,7 +272,7 @@ static int perf_ibs_init(struct perf_event *event)
|
|||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct perf_ibs *perf_ibs;
|
||||
u64 max_cnt, config;
|
||||
u64 config;
|
||||
int ret;
|
||||
|
||||
perf_ibs = get_ibs_pmu(event->attr.type);
|
||||
|
@ -306,10 +306,19 @@ static int perf_ibs_init(struct perf_event *event)
|
|||
if (!hwc->sample_period)
|
||||
hwc->sample_period = 0x10;
|
||||
} else {
|
||||
max_cnt = config & perf_ibs->cnt_mask;
|
||||
u64 period = 0;
|
||||
|
||||
if (perf_ibs == &perf_ibs_op) {
|
||||
period = (config & IBS_OP_MAX_CNT) << 4;
|
||||
if (ibs_caps & IBS_CAPS_OPCNTEXT)
|
||||
period |= config & IBS_OP_MAX_CNT_EXT_MASK;
|
||||
} else {
|
||||
period = (config & IBS_FETCH_MAX_CNT) << 4;
|
||||
}
|
||||
|
||||
config &= ~perf_ibs->cnt_mask;
|
||||
event->attr.sample_period = max_cnt << 4;
|
||||
hwc->sample_period = event->attr.sample_period;
|
||||
event->attr.sample_period = period;
|
||||
hwc->sample_period = period;
|
||||
}
|
||||
|
||||
if (!hwc->sample_period)
|
||||
|
@ -1219,7 +1228,8 @@ static __init int perf_ibs_op_init(void)
|
|||
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
|
||||
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
|
||||
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
|
||||
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
|
||||
perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
|
||||
IBS_OP_CUR_CNT_EXT_MASK);
|
||||
}
|
||||
|
||||
if (ibs_caps & IBS_CAPS_ZEN4)
|
||||
|
|
|
@ -22,8 +22,9 @@
|
|||
#define SECOND_BYTE_OPCODE_UD2 0x0b
|
||||
|
||||
#define BUG_NONE 0xffff
|
||||
#define BUG_UD1 0xfffe
|
||||
#define BUG_UD2 0xfffd
|
||||
#define BUG_UD2 0xfffe
|
||||
#define BUG_UD1 0xfffd
|
||||
#define BUG_UD1_UBSAN 0xfffc
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
_ASM_PTR fname "\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
static inline __attribute_const__ u32 gen_endbr(void)
|
||||
static __always_inline __attribute_const__ u32 gen_endbr(void)
|
||||
{
|
||||
u32 endbr;
|
||||
|
||||
|
@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void)
|
|||
return endbr;
|
||||
}
|
||||
|
||||
static inline __attribute_const__ u32 gen_endbr_poison(void)
|
||||
static __always_inline __attribute_const__ u32 gen_endbr_poison(void)
|
||||
{
|
||||
/*
|
||||
* 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it
|
||||
|
|
|
@ -59,6 +59,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
|
|||
|
||||
void unregister_nmi_handler(unsigned int, const char *);
|
||||
|
||||
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
|
||||
|
||||
void stop_nmi(void);
|
||||
void restart_nmi(void);
|
||||
void local_touch_nmi(void);
|
||||
|
|
|
@ -501,6 +501,7 @@ struct pebs_xmm {
|
|||
*/
|
||||
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
|
||||
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
|
||||
#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
|
||||
#define IBS_OP_CNT_CTL (1ULL<<19)
|
||||
#define IBS_OP_VAL (1ULL<<18)
|
||||
#define IBS_OP_ENABLE (1ULL<<17)
|
||||
|
|
|
@ -1442,9 +1442,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
|
|||
static enum spectre_v2_user_cmd __init
|
||||
spectre_v2_parse_user_cmdline(void)
|
||||
{
|
||||
enum spectre_v2_user_cmd mode;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
|
||||
SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
|
||||
|
||||
switch (spectre_v2_cmd) {
|
||||
case SPECTRE_V2_CMD_NONE:
|
||||
return SPECTRE_V2_USER_CMD_NONE;
|
||||
|
@ -1457,7 +1461,7 @@ spectre_v2_parse_user_cmdline(void)
|
|||
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
return mode;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
|
||||
if (match_option(arg, ret, v2_user_options[i].option)) {
|
||||
|
@ -1467,8 +1471,8 @@ spectre_v2_parse_user_cmdline(void)
|
|||
}
|
||||
}
|
||||
|
||||
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
|
||||
return mode;
|
||||
}
|
||||
|
||||
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
|
||||
|
|
|
@ -39,8 +39,12 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/nmi.h>
|
||||
|
||||
/*
|
||||
* An emergency handler can be set in any context including NMI
|
||||
*/
|
||||
struct nmi_desc {
|
||||
raw_spinlock_t lock;
|
||||
nmi_handler_t emerg_handler;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
|
@ -131,9 +135,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
|
|||
static int nmi_handle(unsigned int type, struct pt_regs *regs)
|
||||
{
|
||||
struct nmi_desc *desc = nmi_to_desc(type);
|
||||
nmi_handler_t ehandler;
|
||||
struct nmiaction *a;
|
||||
int handled=0;
|
||||
|
||||
/*
|
||||
* Call the emergency handler, if set
|
||||
*
|
||||
* In the case of crash_nmi_callback() emergency handler, it will
|
||||
* return in the case of the crashing CPU to enable it to complete
|
||||
* other necessary crashing actions ASAP. Other handlers in the
|
||||
* linked list won't need to be run.
|
||||
*/
|
||||
ehandler = desc->emerg_handler;
|
||||
if (ehandler)
|
||||
return ehandler(type, regs);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
|
@ -223,6 +240,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
|
||||
|
||||
/**
|
||||
* set_emergency_nmi_handler - Set emergency handler
|
||||
* @type: NMI type
|
||||
* @handler: the emergency handler to be stored
|
||||
*
|
||||
* Set an emergency NMI handler which, if set, will preempt all the other
|
||||
* handlers in the linked list. If a NULL handler is passed in, it will clear
|
||||
* it. It is expected that concurrent calls to this function will not happen
|
||||
* or the system is screwed beyond repair.
|
||||
*/
|
||||
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
|
||||
{
|
||||
struct nmi_desc *desc = nmi_to_desc(type);
|
||||
|
||||
if (WARN_ON_ONCE(desc->emerg_handler == handler))
|
||||
return;
|
||||
desc->emerg_handler = handler;
|
||||
|
||||
/*
|
||||
* Ensure the emergency handler is visible to other CPUs before
|
||||
* function return
|
||||
*/
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static void
|
||||
pci_serr_error(unsigned char reason, struct pt_regs *regs)
|
||||
{
|
||||
|
|
|
@ -908,15 +908,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
|||
shootdown_callback = callback;
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
/* Would it be better to replace the trap vector here? */
|
||||
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
|
||||
NMI_FLAG_FIRST, "crash"))
|
||||
return; /* Return what? */
|
||||
|
||||
/*
|
||||
* Ensure the new callback function is set before sending
|
||||
* out the NMI
|
||||
* Set emergency handler to preempt other handlers.
|
||||
*/
|
||||
wmb();
|
||||
set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
|
||||
|
||||
apic_send_IPI_allbutself(NMI_VECTOR);
|
||||
|
||||
|
|
|
@ -92,10 +92,17 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
|
|||
|
||||
/*
|
||||
* Check for UD1 or UD2, accounting for Address Size Override Prefixes.
|
||||
* If it's a UD1, get the ModRM byte to pass along to UBSan.
|
||||
* If it's a UD1, further decode to determine its use:
|
||||
*
|
||||
* UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
|
||||
* UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
|
||||
* static_call: 0f b9 cc ud1 %esp,%ecx
|
||||
*
|
||||
* Notably UBSAN uses EAX, static_call uses ECX.
|
||||
*/
|
||||
__always_inline int decode_bug(unsigned long addr, u32 *imm)
|
||||
__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
|
||||
{
|
||||
unsigned long start = addr;
|
||||
u8 v;
|
||||
|
||||
if (addr < TASK_SIZE_MAX)
|
||||
|
@ -108,24 +115,42 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm)
|
|||
return BUG_NONE;
|
||||
|
||||
v = *(u8 *)(addr++);
|
||||
if (v == SECOND_BYTE_OPCODE_UD2)
|
||||
if (v == SECOND_BYTE_OPCODE_UD2) {
|
||||
*len = addr - start;
|
||||
return BUG_UD2;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
|
||||
if (v != SECOND_BYTE_OPCODE_UD1)
|
||||
return BUG_NONE;
|
||||
|
||||
/* Retrieve the immediate (type value) for the UBSAN UD1 */
|
||||
v = *(u8 *)(addr++);
|
||||
if (X86_MODRM_RM(v) == 4)
|
||||
addr++;
|
||||
|
||||
*imm = 0;
|
||||
if (X86_MODRM_MOD(v) == 1)
|
||||
*imm = *(u8 *)addr;
|
||||
else if (X86_MODRM_MOD(v) == 2)
|
||||
*imm = *(u32 *)addr;
|
||||
else
|
||||
WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
|
||||
v = *(u8 *)(addr++); /* ModRM */
|
||||
|
||||
if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
|
||||
addr++; /* SIB */
|
||||
|
||||
/* Decode immediate, if present */
|
||||
switch (X86_MODRM_MOD(v)) {
|
||||
case 0: if (X86_MODRM_RM(v) == 5)
|
||||
addr += 4; /* RIP + disp32 */
|
||||
break;
|
||||
|
||||
case 1: *imm = *(s8 *)addr;
|
||||
addr += 1;
|
||||
break;
|
||||
|
||||
case 2: *imm = *(s32 *)addr;
|
||||
addr += 4;
|
||||
break;
|
||||
|
||||
case 3: break;
|
||||
}
|
||||
|
||||
/* record instruction length */
|
||||
*len = addr - start;
|
||||
|
||||
if (X86_MODRM_REG(v) == 0) /* EAX */
|
||||
return BUG_UD1_UBSAN;
|
||||
|
||||
return BUG_UD1;
|
||||
}
|
||||
|
@ -256,10 +281,10 @@ static inline void handle_invalid_op(struct pt_regs *regs)
|
|||
static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
{
|
||||
bool handled = false;
|
||||
int ud_type;
|
||||
u32 imm;
|
||||
int ud_type, ud_len;
|
||||
s32 ud_imm;
|
||||
|
||||
ud_type = decode_bug(regs->ip, &imm);
|
||||
ud_type = decode_bug(regs->ip, &ud_imm, &ud_len);
|
||||
if (ud_type == BUG_NONE)
|
||||
return handled;
|
||||
|
||||
|
@ -279,15 +304,28 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
|||
*/
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
raw_local_irq_enable();
|
||||
if (ud_type == BUG_UD2) {
|
||||
|
||||
switch (ud_type) {
|
||||
case BUG_UD2:
|
||||
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
|
||||
handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
|
||||
regs->ip += LEN_UD2;
|
||||
regs->ip += ud_len;
|
||||
handled = true;
|
||||
}
|
||||
} else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
|
||||
pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
|
||||
break;
|
||||
|
||||
case BUG_UD1_UBSAN:
|
||||
if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
|
||||
pr_crit("%s at %pS\n",
|
||||
report_ubsan_failure(regs, ud_imm),
|
||||
(void *)regs->ip);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
raw_local_irq_disable();
|
||||
instrumentation_end();
|
||||
|
|
|
@ -644,8 +644,13 @@ static void __init memory_map_top_down(unsigned long map_start,
|
|||
*/
|
||||
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
|
||||
map_end);
|
||||
memblock_phys_free(addr, PMD_SIZE);
|
||||
real_end = addr + PMD_SIZE;
|
||||
if (!addr) {
|
||||
pr_warn("Failed to release memory for alloc_low_pages()");
|
||||
real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
|
||||
} else {
|
||||
memblock_phys_free(addr, PMD_SIZE);
|
||||
real_end = addr + PMD_SIZE;
|
||||
}
|
||||
|
||||
/* step_size need to be small so pgt_buf from BRK could cover it */
|
||||
step_size = PMD_SIZE;
|
||||
|
|
|
@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
|||
ret = __add_pages(nid, start_pfn, nr_pages, params);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
/* update max_pfn, max_low_pfn and high_memory */
|
||||
update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
|
||||
nr_pages << PAGE_SHIFT);
|
||||
/*
|
||||
* Special case: add_pages() is called by memremap_pages() for adding device
|
||||
* private pages. Do not bump up max_pfn in the device private path,
|
||||
* because max_pfn changes affect dma_addressing_limited().
|
||||
*
|
||||
* dma_addressing_limited() returning true when max_pfn is the device's
|
||||
* addressable memory can force device drivers to use bounce buffers
|
||||
* and impact their performance negatively:
|
||||
*/
|
||||
if (!params->pgmap)
|
||||
/* update max_pfn, max_low_pfn and high_memory */
|
||||
update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void)
|
|||
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
|
||||
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
|
||||
|
||||
/* Adapt physical memory region size based on available memory */
|
||||
if (memory_tb < kaslr_regions[0].size_tb)
|
||||
/*
|
||||
* Adapt physical memory region size based on available memory,
|
||||
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
|
||||
* device BAR space assuming the direct map space is large enough
|
||||
* for creating a ZONE_DEVICE mapping in the direct map corresponding
|
||||
* to the physical BAR address.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
|
||||
kaslr_regions[0].size_tb = memory_tb;
|
||||
|
||||
/*
|
||||
|
|
|
@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
|
|||
COPY(RIP);
|
||||
COPY2(EFLAGS, EFL);
|
||||
COPY2(CS, CSGSFS);
|
||||
regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
|
||||
regs->gp[CS / sizeof(unsigned long)] |= 3;
|
||||
regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -427,6 +427,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
|||
hash->setkey = ahash_nosetkey;
|
||||
|
||||
crypto_ahash_set_statesize(hash, alg->halg.statesize);
|
||||
crypto_ahash_set_reqsize(hash, alg->reqsize);
|
||||
|
||||
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
|
||||
return crypto_init_shash_ops_async(tfm);
|
||||
|
@ -599,6 +600,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
|
|||
if (alg->halg.statesize == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (alg->reqsize && alg->reqsize < alg->halg.statesize)
|
||||
return -EINVAL;
|
||||
|
||||
err = hash_prepare_alg(&alg->halg);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
|||
goto out_free_state;
|
||||
|
||||
err = crypto_ahash_import(&ctx2->req, state);
|
||||
if (err) {
|
||||
sock_orphan(sk2);
|
||||
sock_put(sk2);
|
||||
}
|
||||
|
||||
out_free_state:
|
||||
kfree_sensitive(state);
|
||||
|
|
|
@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
|
|||
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
|
||||
int err;
|
||||
|
||||
err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
|
||||
err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
|
||||
|
||||
if (err != LZO_E_OK)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
|
|||
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
|
||||
int err;
|
||||
|
||||
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
|
||||
err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
|
||||
|
||||
if (err != LZO_E_OK)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -811,6 +811,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
|
|||
|
||||
/* Only sync algorithms allowed. */
|
||||
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
|
||||
type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
|
||||
|
||||
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
|
||||
|
||||
|
|
|
@ -400,7 +400,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
|
|||
int bars;
|
||||
int ret;
|
||||
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
|
||||
|
||||
/* make sure the device has the expected BARs */
|
||||
if (bars != (BIT(0) | BIT(2) | BIT(4))) {
|
||||
|
|
|
@ -438,7 +438,7 @@ config ACPI_SBS
|
|||
the modules will be called sbs and sbshc.
|
||||
|
||||
config ACPI_HED
|
||||
tristate "Hardware Error Device"
|
||||
bool "Hardware Error Device"
|
||||
help
|
||||
This driver supports the Hardware Error Device (PNP0C33),
|
||||
which is used to report some hardware errors notified via
|
||||
|
|
|
@ -355,8 +355,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
|
|||
* device represented by it.
|
||||
*/
|
||||
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
|
||||
{"INT3F0D"},
|
||||
{"INTC1080"},
|
||||
{"INTC1081"},
|
||||
{"INTC1099"},
|
||||
{""},
|
||||
};
|
||||
|
||||
|
|
|
@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
|
|||
.remove = acpi_hed_remove,
|
||||
},
|
||||
};
|
||||
module_acpi_driver(acpi_hed_driver);
|
||||
|
||||
static int __init acpi_hed_driver_init(void)
|
||||
{
|
||||
return acpi_bus_register_driver(&acpi_hed_driver);
|
||||
}
|
||||
subsys_initcall(acpi_hed_driver_init);
|
||||
|
||||
MODULE_AUTHOR("Huang Ying");
|
||||
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
|
||||
|
|
|
@ -594,18 +594,19 @@ static int charlcd_init(struct charlcd *lcd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct charlcd *charlcd_alloc(void)
|
||||
struct charlcd *charlcd_alloc(unsigned int drvdata_size)
|
||||
{
|
||||
struct charlcd_priv *priv;
|
||||
struct charlcd *lcd;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
|
||||
if (!priv)
|
||||
return NULL;
|
||||
|
||||
priv->esc_seq.len = -1;
|
||||
|
||||
lcd = &priv->lcd;
|
||||
lcd->drvdata = priv->drvdata;
|
||||
|
||||
return lcd;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ struct charlcd {
|
|||
unsigned long y;
|
||||
} addr;
|
||||
|
||||
void *drvdata;
|
||||
void *drvdata; /* Set by charlcd_alloc() */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -93,7 +93,8 @@ struct charlcd_ops {
|
|||
};
|
||||
|
||||
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
|
||||
struct charlcd *charlcd_alloc(void);
|
||||
|
||||
struct charlcd *charlcd_alloc(unsigned int drvdata_size);
|
||||
void charlcd_free(struct charlcd *lcd);
|
||||
|
||||
int charlcd_register(struct charlcd *lcd);
|
||||
|
|
|
@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev)
|
|||
if (!hdc)
|
||||
return -ENOMEM;
|
||||
|
||||
lcd = charlcd_alloc();
|
||||
lcd = charlcd_alloc(0);
|
||||
if (!lcd)
|
||||
goto fail1;
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
lcd = charlcd_alloc();
|
||||
lcd = charlcd_alloc(0);
|
||||
if (!lcd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -835,7 +835,7 @@ static void lcd_init(void)
|
|||
if (!hdc)
|
||||
return;
|
||||
|
||||
charlcd = charlcd_alloc();
|
||||
charlcd = charlcd_alloc(0);
|
||||
if (!charlcd) {
|
||||
kfree(hdc);
|
||||
return;
|
||||
|
|
|
@ -3525,9 +3525,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
|||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int skip = 0;
|
||||
u8 pkt_type;
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
u16 seqno;
|
||||
u32 dump_size;
|
||||
|
||||
|
@ -3536,18 +3535,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
struct usb_device *udev = btdata->udev;
|
||||
|
||||
pkt_type = hci_skb_pkt_type(skb);
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
skip = sizeof(struct hci_event_hdr);
|
||||
if (pkt_type == HCI_ACLDATA_PKT)
|
||||
skip += sizeof(struct hci_acl_hdr);
|
||||
|
||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
}
|
||||
skb_pull(skb, skip);
|
||||
dump_hdr = (struct qca_dump_hdr *)skb->data;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||
if (seqno == 0) {
|
||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||
|
@ -3567,16 +3561,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
btdata->qca_dump.ram_dump_size = dump_size;
|
||||
btdata->qca_dump.ram_dump_seqno = 0;
|
||||
sk_ptr += offsetof(struct qca_dump_hdr, data0);
|
||||
sk_len -= offsetof(struct qca_dump_hdr, data0);
|
||||
|
||||
skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
|
||||
|
||||
usb_disable_autosuspend(udev);
|
||||
bt_dev_info(hdev, "%s memdump size(%u)\n",
|
||||
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
|
||||
dump_size);
|
||||
} else {
|
||||
sk_ptr += offsetof(struct qca_dump_hdr, data);
|
||||
sk_len -= offsetof(struct qca_dump_hdr, data);
|
||||
skb_pull(skb, offsetof(struct qca_dump_hdr, data));
|
||||
}
|
||||
|
||||
if (!btdata->qca_dump.ram_dump_size) {
|
||||
|
@ -3596,7 +3589,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
skb_pull(skb, skb->len - sk_len);
|
||||
hci_devcd_append(hdev, skb);
|
||||
btdata->qca_dump.ram_dump_seqno++;
|
||||
if (seqno == QCA_LAST_SEQUENCE_NUM) {
|
||||
|
@ -3624,68 +3616,58 @@ out:
|
|||
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
|
||||
bool is_dump = false;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
if (!clone)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
|
||||
if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
|
||||
goto out;
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
|
||||
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
|
||||
goto out;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
|
||||
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
goto out;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
is_dump = true;
|
||||
out:
|
||||
consume_skb(clone);
|
||||
return is_dump;
|
||||
}
|
||||
|
||||
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
|
||||
bool is_dump = false;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
if (!clone)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
|
||||
if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
|
||||
goto out;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
|
||||
if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
goto out;
|
||||
|
||||
return true;
|
||||
is_dump = true;
|
||||
out:
|
||||
consume_skb(clone);
|
||||
return is_dump;
|
||||
}
|
||||
|
||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
|
|
@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
|
|||
if (!clk_data)
|
||||
return -ENOMEM;
|
||||
|
||||
clk_data->num = S2MPS11_CLKS_NUM;
|
||||
|
||||
switch (hwid) {
|
||||
case S2MPS11X:
|
||||
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
|
||||
|
@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
|
|||
clk_data->hws[i] = &s2mps11_clks[i].hw;
|
||||
}
|
||||
|
||||
clk_data->num = S2MPS11_CLKS_NUM;
|
||||
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
|
||||
clk_data);
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/units.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
|
|||
static struct clk_hw **hws;
|
||||
static struct clk_hw_onecell_data *clk_hw_data;
|
||||
|
||||
struct imx8mp_clock_constraints {
|
||||
unsigned int clkid;
|
||||
u32 maxrate;
|
||||
};
|
||||
|
||||
/*
|
||||
* Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
|
||||
* Table 13. Maximum frequency of modules.
|
||||
* Probable typos fixed are marked with a comment.
|
||||
*/
|
||||
static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
|
||||
{ IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
|
||||
{ IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
|
||||
{ IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
|
||||
{ IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_AHB, 133333333 },
|
||||
{ IMX8MP_CLK_IPG_ROOT, 66666667 },
|
||||
{ IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_DRAM_ALT, 666666667 },
|
||||
{ IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
|
||||
{ IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
|
||||
{ /* Sentinel */ }
|
||||
};
|
||||
|
||||
static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
|
||||
{ IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
|
||||
{ IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
|
||||
{ /* Sentinel */ }
|
||||
};
|
||||
|
||||
static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
|
||||
{ IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
|
||||
{ IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
|
||||
{ IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
|
||||
{ IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
|
||||
{ /* Sentinel */ }
|
||||
};
|
||||
|
||||
static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
|
||||
{
|
||||
const struct imx8mp_clock_constraints *constr;
|
||||
|
||||
for (constr = constraints; constr->clkid; constr++)
|
||||
clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
|
||||
}
|
||||
|
||||
static int imx8mp_clocks_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np;
|
||||
void __iomem *anatop_base, *ccm_base;
|
||||
const char *opmode;
|
||||
int err;
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
|
||||
|
@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
|
|||
|
||||
imx_check_clk_hws(hws, IMX8MP_CLK_END);
|
||||
|
||||
imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
|
||||
|
||||
err = of_property_read_string(np, "fsl,operating-mode", &opmode);
|
||||
if (!err) {
|
||||
if (!strcmp(opmode, "nominal"))
|
||||
imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
|
||||
else if (!strcmp(opmode, "overdrive"))
|
||||
imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
|
||||
}
|
||||
|
||||
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "failed to register hws for i.MX8MP\n");
|
||||
|
|
|
@ -148,7 +148,7 @@ config IPQ_GCC_4019
|
|||
|
||||
config IPQ_GCC_5018
|
||||
tristate "IPQ5018 Global Clock Controller"
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
help
|
||||
Support for global clock controller on ipq5018 devices.
|
||||
Say Y if you want to use peripheral devices such as UART, SPI,
|
||||
|
|
|
@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_2,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_3,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_4,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_1,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
|
|||
.parent_data = cam_cc_parent_data_0,
|
||||
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_shared_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -645,14 +645,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
u32 alpha_width = pll_alpha_width(pll);
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
|
||||
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
|
||||
return 0;
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
|
||||
if (ctl & PLL_ALPHA_EN) {
|
||||
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
|
||||
return 0;
|
||||
if (alpha_width > 32) {
|
||||
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
|
||||
&high);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
|
||||
&high))
|
||||
return 0;
|
||||
a = (u64)high << 32 | low;
|
||||
} else {
|
||||
a = low & GENMASK(alpha_width - 1, 0);
|
||||
|
@ -844,8 +849,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
|
||||
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
|
||||
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
|
||||
return 0;
|
||||
|
||||
if (ctl & PLL_ALPHA_EN) {
|
||||
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
|
||||
|
@ -1039,8 +1047,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
u32 l, frac, alpha_width = pll_alpha_width(pll);
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
|
||||
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
|
||||
if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
|
||||
return 0;
|
||||
|
||||
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
|
||||
}
|
||||
|
@ -1098,7 +1109,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
|
||||
u32 ctl;
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
|
||||
return 0;
|
||||
|
||||
ctl >>= PLL_POST_DIV_SHIFT;
|
||||
ctl &= PLL_POST_DIV_MASK(pll);
|
||||
|
@ -1314,8 +1326,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
|
|||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
u32 l, frac, alpha_width = pll_alpha_width(pll);
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
|
||||
regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
|
||||
if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
|
||||
return 0;
|
||||
|
||||
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
|
||||
}
|
||||
|
@ -1465,7 +1480,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
|
|||
struct regmap *regmap = pll->clkr.regmap;
|
||||
u32 i, div = 1, val;
|
||||
|
||||
regmap_read(regmap, PLL_USER_CTL(pll), &val);
|
||||
if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
|
||||
return 0;
|
||||
|
||||
val >>= pll->post_div_shift;
|
||||
val &= PLL_POST_DIV_MASK(pll);
|
||||
|
@ -2339,9 +2355,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
|
|||
struct regmap *regmap = pll->clkr.regmap;
|
||||
u32 l, frac;
|
||||
|
||||
regmap_read(regmap, PLL_L_VAL(pll), &l);
|
||||
if (regmap_read(regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
l &= LUCID_EVO_PLL_L_VAL_MASK;
|
||||
regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
|
||||
|
||||
if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
|
||||
return 0;
|
||||
|
||||
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
|
||||
}
|
||||
|
@ -2416,7 +2435,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
|
|||
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
|
||||
u32 l;
|
||||
|
||||
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
|
||||
if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
|
||||
return 0;
|
||||
|
||||
return parent_rate * l;
|
||||
}
|
||||
|
|
|
@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
|
|||
{ .hw = &pll_periph0_2x_clk.common.hw },
|
||||
{ .hw = &pll_audio1_div2_clk.common.hw },
|
||||
};
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
0);
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
|
||||
mmc0_mmc1_parents, 0x830,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
2, /* post-div */
|
||||
0);
|
||||
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
0);
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
|
||||
mmc0_mmc1_parents, 0x834,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
2, /* post-div */
|
||||
0);
|
||||
|
||||
static const struct clk_parent_data mmc2_parents[] = {
|
||||
{ .fw_name = "hosc" },
|
||||
|
@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
|
|||
{ .hw = &pll_periph0_800M_clk.common.hw },
|
||||
{ .hw = &pll_audio1_div2_clk.common.hw },
|
||||
};
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
0);
|
||||
static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
|
||||
0x838,
|
||||
0, 4, /* M */
|
||||
8, 2, /* P */
|
||||
24, 3, /* mux */
|
||||
BIT(31), /* gate */
|
||||
2, /* post-div */
|
||||
0);
|
||||
|
||||
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
|
||||
0x84c, BIT(0), 0);
|
||||
|
|
|
@ -52,6 +52,28 @@ struct ccu_mp {
|
|||
} \
|
||||
}
|
||||
|
||||
#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
|
||||
_reg, \
|
||||
_mshift, _mwidth, \
|
||||
_pshift, _pwidth, \
|
||||
_muxshift, _muxwidth, \
|
||||
_gate, _postdiv, _flags)\
|
||||
struct ccu_mp _struct = { \
|
||||
.enable = _gate, \
|
||||
.m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
|
||||
.p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
|
||||
.mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
|
||||
.fixed_post_div = _postdiv, \
|
||||
.common = { \
|
||||
.reg = _reg, \
|
||||
.features = CCU_FEATURE_FIXED_POSTDIV, \
|
||||
.hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
|
||||
_parents, \
|
||||
&ccu_mp_ops, \
|
||||
_flags), \
|
||||
} \
|
||||
}
|
||||
|
||||
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
|
||||
_mshift, _mwidth, \
|
||||
_pshift, _pwidth, \
|
||||
|
|
|
@ -114,6 +114,9 @@ static void gic_update_frequency(void *data)
|
|||
|
||||
static int gic_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
/* Ensure the GIC counter is running */
|
||||
clear_gic_config(GIC_CONFIG_COUNTSTOP);
|
||||
|
||||
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
|
||||
return 0;
|
||||
}
|
||||
|
@ -248,9 +251,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
|
|||
pr_warn("Unable to register clock notifier\n");
|
||||
}
|
||||
|
||||
/* And finally start the counter */
|
||||
clear_gic_config(GIC_CONFIG_COUNTSTOP);
|
||||
|
||||
/*
|
||||
* It's safe to use the MIPS GIC timer as a sched clock source only if
|
||||
* its ticks are stable, which is true on either the platforms with
|
||||
|
|
|
@ -165,6 +165,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
|||
{ .compatible = "qcom,sm8350", },
|
||||
{ .compatible = "qcom,sm8450", },
|
||||
{ .compatible = "qcom,sm8550", },
|
||||
{ .compatible = "qcom,sm8650", },
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
|
|
|
@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
|
||||
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
|
||||
u32 cpu;
|
||||
|
||||
policy->freq_table = data->clusters[cluster].table;
|
||||
policy->cpuinfo.transition_latency = 300 * 1000;
|
||||
policy->driver_data = NULL;
|
||||
|
||||
/* set same policy for all cpus in a cluster */
|
||||
for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
|
||||
if (data->cpus[cpu].bpmp_cluster_id == cluster)
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -246,8 +246,19 @@ again:
|
|||
* This can deal with workloads that have long pauses interspersed
|
||||
* with sporadic activity with a bunch of short pauses.
|
||||
*/
|
||||
if ((divisor * 4) <= INTERVALS * 3)
|
||||
if (divisor * 4 <= INTERVALS * 3) {
|
||||
/*
|
||||
* If there are sufficiently many data points still under
|
||||
* consideration after the outliers have been eliminated,
|
||||
* returning without a prediction would be a mistake because it
|
||||
* is likely that the next interval will not exceed the current
|
||||
* maximum, so return the latter in that case.
|
||||
*/
|
||||
if (divisor >= INTERVALS / 2)
|
||||
return max;
|
||||
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
thresh = max - 1;
|
||||
goto again;
|
||||
|
|
|
@ -410,9 +410,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
|
|||
break;
|
||||
}
|
||||
|
||||
dev_err(&pdev->dev,
|
||||
"Request failed with software error code 0x%x\n",
|
||||
cpt_status->s.uc_compcode);
|
||||
pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
|
||||
cpt_status->s.uc_compcode,
|
||||
info->req->areq->tfm->__crt_alg->cra_name,
|
||||
info->req->areq->tfm->__crt_alg->cra_driver_name);
|
||||
otx2_cpt_dump_sg_list(pdev, info->req);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
|
|||
|
||||
intr = edma_readl_chreg(fsl_chan, ch_int);
|
||||
if (!intr)
|
||||
return IRQ_HANDLED;
|
||||
return IRQ_NONE;
|
||||
|
||||
edma_writel_chreg(fsl_chan, 1, ch_int);
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
struct idxd_wq *wq;
|
||||
struct device *dev, *fdev;
|
||||
int rc = 0;
|
||||
struct iommu_sva *sva;
|
||||
struct iommu_sva *sva = NULL;
|
||||
unsigned int pasid;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
|
||||
|
@ -322,7 +322,7 @@ failed_set_pasid:
|
|||
if (device_user_pasid_enabled(idxd))
|
||||
idxd_xa_pasid_remove(ctx);
|
||||
failed_get_pasid:
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
|
||||
iommu_sva_unbind_device(sva);
|
||||
failed:
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
|
@ -412,6 +412,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
if (current->mm != ctx->mm)
|
||||
return -EPERM;
|
||||
|
||||
rc = check_vma(wq, vma, __func__);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
@ -478,6 +481,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
|
|||
ssize_t written = 0;
|
||||
int i;
|
||||
|
||||
if (current->mm != ctx->mm)
|
||||
return -EPERM;
|
||||
|
||||
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
|
||||
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
|
||||
|
||||
|
@ -498,6 +504,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
|
|||
struct idxd_device *idxd = wq->idxd;
|
||||
__poll_t out = 0;
|
||||
|
||||
if (current->mm != ctx->mm)
|
||||
return POLLNVAL;
|
||||
|
||||
poll_wait(filp, &wq->err_queue, wait);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
if (idxd->sw_err.valid)
|
||||
|
@ -584,6 +593,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
|
|||
|
||||
static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
||||
{
|
||||
struct device *dev = &idxd_dev->conf_dev;
|
||||
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
int rc;
|
||||
|
@ -611,6 +621,12 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
|
|||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
|
||||
if (!idxd_wq_driver_name_match(wq, dev)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
|
||||
rc = -ENODEV;
|
||||
goto wq_err;
|
||||
}
|
||||
|
||||
wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
|
||||
if (!wq->wq) {
|
||||
rc = -ENOMEM;
|
||||
|
|
|
@ -306,6 +306,12 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
|
|||
return -ENXIO;
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
if (!idxd_wq_driver_name_match(wq, dev)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
|
||||
rc = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
wq->type = IDXD_WQT_KERNEL;
|
||||
|
||||
rc = drv_enable_wq(wq);
|
||||
|
|
|
@ -159,6 +159,8 @@ struct idxd_cdev {
|
|||
int minor;
|
||||
};
|
||||
|
||||
#define DRIVER_NAME_SIZE 128
|
||||
|
||||
#define IDXD_ALLOCATED_BATCH_SIZE 128U
|
||||
#define WQ_NAME_SIZE 1024
|
||||
#define WQ_TYPE_SIZE 10
|
||||
|
@ -227,6 +229,8 @@ struct idxd_wq {
|
|||
/* Lock to protect upasid_xa access. */
|
||||
struct mutex uc_lock;
|
||||
struct xarray upasid_xa;
|
||||
|
||||
char driver_name[DRIVER_NAME_SIZE + 1];
|
||||
};
|
||||
|
||||
struct idxd_engine {
|
||||
|
@ -648,6 +652,11 @@ static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wq
|
|||
wqcfg->max_batch_shift = max_batch_shift;
|
||||
}
|
||||
|
||||
static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev)
|
||||
{
|
||||
return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
|
||||
}
|
||||
|
||||
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
|
||||
struct module *module, const char *mod_name);
|
||||
#define idxd_driver_register(driver) \
|
||||
|
|
|
@ -1282,6 +1282,39 @@ err:
|
|||
static struct device_attribute dev_attr_wq_op_config =
|
||||
__ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
|
||||
|
||||
static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", wq->driver_name);
|
||||
}
|
||||
|
||||
static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct idxd_wq *wq = confdev_to_wq(dev);
|
||||
char *input, *pos;
|
||||
|
||||
if (wq->state != IDXD_WQ_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
input = kstrndup(buf, count, GFP_KERNEL);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
||||
pos = strim(input);
|
||||
memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
|
||||
sprintf(wq->driver_name, "%s", pos);
|
||||
kfree(input);
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_driver_name =
|
||||
__ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store);
|
||||
|
||||
static struct attribute *idxd_wq_attributes[] = {
|
||||
&dev_attr_wq_clients.attr,
|
||||
&dev_attr_wq_state.attr,
|
||||
|
@ -1301,6 +1334,7 @@ static struct attribute *idxd_wq_attributes[] = {
|
|||
&dev_attr_wq_occupancy.attr,
|
||||
&dev_attr_wq_enqcmds_retries.attr,
|
||||
&dev_attr_wq_op_config.attr,
|
||||
&dev_attr_wq_driver_name.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -405,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
|
|||
int i, j, ret;
|
||||
struct mem_ctl_info *mci = NULL;
|
||||
struct edac_mc_layer layers[2];
|
||||
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
|
||||
void __iomem *window;
|
||||
struct ie31200_priv *priv;
|
||||
u32 addr_decode, mad_offset;
|
||||
u32 addr_decode[IE31200_CHANNELS], mad_offset;
|
||||
|
||||
/*
|
||||
* Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
|
||||
|
@ -466,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
|
|||
mad_offset = IE31200_MAD_DIMM_0_OFFSET;
|
||||
}
|
||||
|
||||
/* populate DIMM info */
|
||||
for (i = 0; i < IE31200_CHANNELS; i++) {
|
||||
addr_decode = readl(window + mad_offset +
|
||||
addr_decode[i] = readl(window + mad_offset +
|
||||
(i * 4));
|
||||
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
|
||||
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
|
||||
populate_dimm_info(&dimm_info[i][j], addr_decode, j,
|
||||
skl);
|
||||
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
|
||||
dimm_info[i][j].size,
|
||||
dimm_info[i][j].dual_rank,
|
||||
dimm_info[i][j].x16_width);
|
||||
}
|
||||
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -489,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
|
|||
*/
|
||||
for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
|
||||
for (j = 0; j < IE31200_CHANNELS; j++) {
|
||||
struct dimm_data dimm_info;
|
||||
struct dimm_info *dimm;
|
||||
unsigned long nr_pages;
|
||||
|
||||
nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
|
||||
populate_dimm_info(&dimm_info, addr_decode[j], i,
|
||||
skl);
|
||||
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
|
||||
dimm_info.size,
|
||||
dimm_info.dual_rank,
|
||||
dimm_info.x16_width);
|
||||
|
||||
nr_pages = IE31200_PAGES(dimm_info.size, skl);
|
||||
if (nr_pages == 0)
|
||||
continue;
|
||||
|
||||
if (dimm_info[j][i].dual_rank) {
|
||||
if (dimm_info.dual_rank) {
|
||||
nr_pages = nr_pages / 2;
|
||||
dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
|
||||
dimm->nr_pages = nr_pages;
|
||||
|
|
|
@ -191,6 +191,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
|
|||
dev = &ffa_dev->dev;
|
||||
dev->bus = &ffa_bus_type;
|
||||
dev->release = ffa_release_device;
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
|
||||
|
||||
ffa_dev->id = id;
|
||||
|
|
|
@ -121,6 +121,14 @@ static int ffa_version_check(u32 *version)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
|
||||
pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
|
||||
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
|
||||
FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
|
||||
FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ver.a0 < FFA_MIN_VERSION) {
|
||||
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
|
||||
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
|
||||
|
|
|
@ -42,7 +42,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
|
|||
* This helper let an SCMI driver request specific devices identified by the
|
||||
* @id_table to be created for each active SCMI instance.
|
||||
*
|
||||
* The requested device name MUST NOT be already existent for any protocol;
|
||||
* The requested device name MUST NOT be already existent for this protocol;
|
||||
* at first the freshly requested @id_table is annotated in the IDR table
|
||||
* @scmi_requested_devices and then the requested device is advertised to any
|
||||
* registered party via the @scmi_requested_devices_nh notification chain.
|
||||
|
@ -52,7 +52,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
|
|||
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int id = 0;
|
||||
struct list_head *head, *phead = NULL;
|
||||
struct scmi_requested_dev *rdev;
|
||||
|
||||
|
@ -67,19 +66,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
|
|||
}
|
||||
|
||||
/*
|
||||
* Search for the matching protocol rdev list and then search
|
||||
* of any existent equally named device...fails if any duplicate found.
|
||||
* Find the matching protocol rdev list and then search of any
|
||||
* existent equally named device...fails if any duplicate found.
|
||||
*/
|
||||
mutex_lock(&scmi_requested_devices_mtx);
|
||||
idr_for_each_entry(&scmi_requested_devices, head, id) {
|
||||
if (!phead) {
|
||||
/* A list found registered in the IDR is never empty */
|
||||
rdev = list_first_entry(head, struct scmi_requested_dev,
|
||||
node);
|
||||
if (rdev->id_table->protocol_id ==
|
||||
id_table->protocol_id)
|
||||
phead = head;
|
||||
}
|
||||
phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
|
||||
if (phead) {
|
||||
head = phead;
|
||||
list_for_each_entry(rdev, head, node) {
|
||||
if (!strcmp(rdev->id_table->name, id_table->name)) {
|
||||
pr_err("Ignoring duplicate request [%d] %s\n",
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
/* V2 Defines */
|
||||
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
|
||||
|
||||
#define V2_CREDIT_TIMEOUT_US 20000
|
||||
#define V2_CREDIT_TIMEOUT_US 40000
|
||||
#define V2_CHECK_CREDIT_US 10
|
||||
#define V2_POLL_TIMEOUT_US 1000000
|
||||
#define V2_USER_TIMEOUT_US 500000
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <linux/i2c.h>
|
||||
|
@ -519,12 +520,10 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
|
|||
struct pca953x_chip *chip = gpiochip_get_data(gc);
|
||||
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
|
||||
u8 bit = BIT(off % BANK_SZ);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = regmap_write_bits(chip->regmap, dirreg, bit, bit);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
return ret;
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
return regmap_write_bits(chip->regmap, dirreg, bit, bit);
|
||||
}
|
||||
|
||||
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
|
||||
|
@ -536,17 +535,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
|
|||
u8 bit = BIT(off % BANK_SZ);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
/* set output level */
|
||||
ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
|
||||
if (ret)
|
||||
goto exit;
|
||||
return ret;
|
||||
|
||||
/* then direction */
|
||||
ret = regmap_write_bits(chip->regmap, dirreg, bit, 0);
|
||||
exit:
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
return ret;
|
||||
return regmap_write_bits(chip->regmap, dirreg, bit, 0);
|
||||
}
|
||||
|
||||
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
|
||||
|
@ -557,9 +554,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
|
|||
u32 reg_val;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = regmap_read(chip->regmap, inreg, ®_val);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
scoped_guard(mutex, &chip->i2c_lock)
|
||||
ret = regmap_read(chip->regmap, inreg, ®_val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -572,9 +568,9 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
|
|||
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
|
||||
u8 bit = BIT(off % BANK_SZ);
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
}
|
||||
|
||||
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
|
||||
|
@ -585,9 +581,8 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
|
|||
u32 reg_val;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = regmap_read(chip->regmap, dirreg, ®_val);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
scoped_guard(mutex, &chip->i2c_lock)
|
||||
ret = regmap_read(chip->regmap, dirreg, ®_val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -604,9 +599,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
|
|||
DECLARE_BITMAP(reg_val, MAX_LINE);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
scoped_guard(mutex, &chip->i2c_lock)
|
||||
ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -621,16 +615,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
|
|||
DECLARE_BITMAP(reg_val, MAX_LINE);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
ret = pca953x_read_regs(chip, chip->regs->output, reg_val);
|
||||
if (ret)
|
||||
goto exit;
|
||||
return;
|
||||
|
||||
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
|
||||
|
||||
pca953x_write_regs(chip, chip->regs->output, reg_val);
|
||||
exit:
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
}
|
||||
|
||||
static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
|
||||
|
@ -638,7 +631,6 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
|
|||
unsigned long config)
|
||||
{
|
||||
enum pin_config_param param = pinconf_to_config_param(config);
|
||||
|
||||
u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset);
|
||||
u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset);
|
||||
u8 bit = BIT(offset % BANK_SZ);
|
||||
|
@ -651,7 +643,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
|
|||
if (!(chip->driver_data & PCA_PCAL))
|
||||
return -ENOTSUPP;
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
/* Configure pull-up/pull-down */
|
||||
if (param == PIN_CONFIG_BIAS_PULL_UP)
|
||||
|
@ -661,17 +653,13 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
|
|||
else
|
||||
ret = 0;
|
||||
if (ret)
|
||||
goto exit;
|
||||
return ret;
|
||||
|
||||
/* Disable/Enable pull-up/pull-down */
|
||||
if (param == PIN_CONFIG_BIAS_DISABLE)
|
||||
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
|
||||
return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
|
||||
else
|
||||
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
return ret;
|
||||
return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
|
||||
}
|
||||
|
||||
static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
|
||||
|
@ -883,10 +871,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
|
|||
|
||||
bitmap_zero(pending, MAX_LINE);
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
ret = pca953x_irq_pending(chip, pending);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
|
||||
scoped_guard(mutex, &chip->i2c_lock)
|
||||
ret = pca953x_irq_pending(chip, pending);
|
||||
if (ret) {
|
||||
ret = 0;
|
||||
|
||||
|
@ -1168,9 +1154,9 @@ static int pca953x_probe(struct i2c_client *client)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int pca953x_regcache_sync(struct device *dev)
|
||||
static int pca953x_regcache_sync(struct pca953x_chip *chip)
|
||||
{
|
||||
struct pca953x_chip *chip = dev_get_drvdata(dev);
|
||||
struct device *dev = &chip->client->dev;
|
||||
int ret;
|
||||
u8 regaddr;
|
||||
|
||||
|
@ -1217,13 +1203,38 @@ static int pca953x_regcache_sync(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pca953x_restore_context(struct pca953x_chip *chip)
|
||||
{
|
||||
int ret;
|
||||
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
if (chip->client->irq > 0)
|
||||
enable_irq(chip->client->irq);
|
||||
regcache_cache_only(chip->regmap, false);
|
||||
regcache_mark_dirty(chip->regmap);
|
||||
ret = pca953x_regcache_sync(chip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return regcache_sync(chip->regmap);
|
||||
}
|
||||
|
||||
static void pca953x_save_context(struct pca953x_chip *chip)
|
||||
{
|
||||
guard(mutex)(&chip->i2c_lock);
|
||||
|
||||
/* Disable IRQ to prevent early triggering while regmap "cache only" is on */
|
||||
if (chip->client->irq > 0)
|
||||
disable_irq(chip->client->irq);
|
||||
regcache_cache_only(chip->regmap, true);
|
||||
}
|
||||
|
||||
static int pca953x_suspend(struct device *dev)
|
||||
{
|
||||
struct pca953x_chip *chip = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
regcache_cache_only(chip->regmap, true);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
pca953x_save_context(chip);
|
||||
|
||||
if (atomic_read(&chip->wakeup_path))
|
||||
device_set_wakeup_path(dev);
|
||||
|
@ -1246,17 +1257,7 @@ static int pca953x_resume(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_lock(&chip->i2c_lock);
|
||||
regcache_cache_only(chip->regmap, false);
|
||||
regcache_mark_dirty(chip->regmap);
|
||||
ret = pca953x_regcache_sync(dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regcache_sync(chip->regmap);
|
||||
mutex_unlock(&chip->i2c_lock);
|
||||
ret = pca953x_restore_context(chip);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to restore register map: %d\n", ret);
|
||||
return ret;
|
||||
|
|
|
@ -43,6 +43,29 @@
|
|||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
|
||||
|
||||
/**
|
||||
* dma_buf_attach_adev - Helper to get adev of an attachment
|
||||
*
|
||||
* @attach: attachment
|
||||
*
|
||||
* Returns:
|
||||
* A struct amdgpu_device * if the attaching device is an amdgpu device or
|
||||
* partition, NULL otherwise.
|
||||
*/
|
||||
static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
|
||||
{
|
||||
if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
|
||||
struct drm_gem_object *obj = attach->importer_priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
|
||||
return amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
|
||||
*
|
||||
|
@ -54,12 +77,14 @@
|
|||
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
|
||||
struct drm_gem_object *obj = dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
int r;
|
||||
|
||||
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
|
||||
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
|
||||
attach->peer2peer = false;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
|
@ -482,6 +507,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
|
|||
struct drm_gem_object *obj = &bo->tbo.base;
|
||||
struct drm_gem_object *gobj;
|
||||
|
||||
if (!adev)
|
||||
return false;
|
||||
|
||||
if (obj->import_attach) {
|
||||
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
#include "amdgpu_securedisplay.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
|
||||
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
|
||||
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
|
||||
|
||||
static int psp_load_smu_fw(struct psp_context *psp);
|
||||
static int psp_rap_terminate(struct psp_context *psp);
|
||||
|
@ -506,7 +506,6 @@ static int psp_sw_fini(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
struct psp_gfx_cmd_resp *cmd = psp->cmd;
|
||||
|
||||
psp_memory_training_fini(psp);
|
||||
|
||||
|
@ -516,8 +515,8 @@ static int psp_sw_fini(void *handle)
|
|||
amdgpu_ucode_release(&psp->cap_fw);
|
||||
amdgpu_ucode_release(&psp->toc_fw);
|
||||
|
||||
kfree(cmd);
|
||||
cmd = NULL;
|
||||
kfree(psp->cmd);
|
||||
psp->cmd = NULL;
|
||||
|
||||
psp_free_shared_bufs(psp);
|
||||
|
||||
|
|
|
@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||
{
|
||||
uint64_t value;
|
||||
|
||||
/* Program the AGP BAR */
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
|
||||
/* Program the AGP BAR */
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||
|
|
|
@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
|
|||
WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
|
||||
}
|
||||
|
||||
/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
|
||||
static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
|
||||
|
||||
for (i = 0; i < 5; i++) { /* DAGB instances */
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance);
|
||||
tmp |= (1 << 15); /* SDMA client is BIT15 */
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp);
|
||||
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance);
|
||||
tmp |= (1 << 15);
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
|
|||
mmhub_v1_7_init_system_aperture_regs(adev);
|
||||
mmhub_v1_7_init_tlb_regs(adev);
|
||||
mmhub_v1_7_init_cache_regs(adev);
|
||||
mmhub_v1_7_init_snoop_override_regs(adev);
|
||||
|
||||
mmhub_v1_7_enable_system_domain(adev);
|
||||
mmhub_v1_7_disable_identity_aperture(adev);
|
||||
|
|
|
@ -213,6 +213,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
|
||||
static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp, inst_mask;
|
||||
int i, j;
|
||||
uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
|
||||
|
||||
inst_mask = adev->aid_mask;
|
||||
for_each_inst(i, inst_mask) {
|
||||
for (j = 0; j < 5; j++) { /* DAGB instances */
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, i,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
|
||||
tmp |= (1 << 15); /* SDMA client is BIT15 */
|
||||
WREG32_SOC15_OFFSET(MMHUB, i,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
|
||||
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, i,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
|
||||
tmp |= (1 << 15);
|
||||
WREG32_SOC15_OFFSET(MMHUB, i,
|
||||
regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t tmp, inst_mask;
|
||||
|
@ -418,6 +444,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
|
|||
mmhub_v1_8_init_system_aperture_regs(adev);
|
||||
mmhub_v1_8_init_tlb_regs(adev);
|
||||
mmhub_v1_8_init_cache_regs(adev);
|
||||
mmhub_v1_8_init_snoop_override_regs(adev);
|
||||
|
||||
mmhub_v1_8_enable_system_domain(adev);
|
||||
mmhub_v1_8_disable_identity_aperture(adev);
|
||||
|
|
|
@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
|
|||
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
|
||||
}
|
||||
|
||||
/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
|
||||
static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid)
|
||||
{
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
|
||||
mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
|
||||
uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET;
|
||||
|
||||
for (i = 0; i < 5 - (2 * hubid); i++) {
|
||||
/* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
|
||||
huboffset + i * distance);
|
||||
tmp |= (1 << 15); /* SDMA client is BIT15 */
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
|
||||
huboffset + i * distance, tmp);
|
||||
|
||||
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
|
||||
huboffset + i * distance);
|
||||
tmp |= (1 << 15);
|
||||
WREG32_SOC15_OFFSET(MMHUB, 0,
|
||||
mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
|
||||
huboffset + i * distance, tmp);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
|
|||
if (!amdgpu_sriov_vf(adev))
|
||||
mmhub_v9_4_init_cache_regs(adev, i);
|
||||
|
||||
mmhub_v9_4_init_snoop_override_regs(adev, i);
|
||||
mmhub_v9_4_enable_system_domain(adev, i);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
mmhub_v9_4_disable_identity_aperture(adev, i);
|
||||
|
|
|
@ -142,23 +142,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
|
|||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
|
|
|
@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
|
|||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
|
|
|
@ -2147,14 +2147,6 @@ failed_try_destroy_debugged_queue:
|
|||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
|
||||
* stay in user mode.
|
||||
*/
|
||||
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
|
||||
/* APE1 limit is inclusive and 64K aligned. */
|
||||
#define APE1_LIMIT_ALIGNMENT 0xFFFF
|
||||
|
||||
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd,
|
||||
enum cache_policy default_policy,
|
||||
|
@ -2169,34 +2161,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
|||
|
||||
dqm_lock(dqm);
|
||||
|
||||
if (alternate_aperture_size == 0) {
|
||||
/* base > limit disables APE1 */
|
||||
qpd->sh_mem_ape1_base = 1;
|
||||
qpd->sh_mem_ape1_limit = 0;
|
||||
} else {
|
||||
/*
|
||||
* In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
|
||||
* SH_MEM_APE1_BASE[31:0], 0x0000 }
|
||||
* APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
|
||||
* SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
|
||||
* Verify that the base and size parameters can be
|
||||
* represented in this format and convert them.
|
||||
* Additionally restrict APE1 to user-mode addresses.
|
||||
*/
|
||||
|
||||
uint64_t base = (uintptr_t)alternate_aperture_base;
|
||||
uint64_t limit = base + alternate_aperture_size - 1;
|
||||
|
||||
if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
|
||||
(limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
|
||||
retval = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
qpd->sh_mem_ape1_base = base >> 16;
|
||||
qpd->sh_mem_ape1_limit = limit >> 16;
|
||||
}
|
||||
|
||||
retval = dqm->asic_ops.set_cache_memory_policy(
|
||||
dqm,
|
||||
qpd,
|
||||
|
@ -2205,6 +2169,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
|
|||
alternate_aperture_base,
|
||||
alternate_aperture_size);
|
||||
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
|
||||
program_sh_mem_settings(dqm, qpd);
|
||||
|
||||
|
|
|
@ -27,6 +27,14 @@
|
|||
#include "oss/oss_2_4_sh_mask.h"
|
||||
#include "gca/gfx_7_2_sh_mask.h"
|
||||
|
||||
/*
|
||||
* Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
|
||||
* stay in user mode.
|
||||
*/
|
||||
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
|
||||
/* APE1 limit is inclusive and 64K aligned. */
|
||||
#define APE1_LIMIT_ALIGNMENT 0xFFFF
|
||||
|
||||
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd,
|
||||
enum cache_policy default_policy,
|
||||
|
@ -84,6 +92,36 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
|
|||
{
|
||||
uint32_t default_mtype;
|
||||
uint32_t ape1_mtype;
|
||||
unsigned int temp;
|
||||
bool retval = true;
|
||||
|
||||
if (alternate_aperture_size == 0) {
|
||||
/* base > limit disables APE1 */
|
||||
qpd->sh_mem_ape1_base = 1;
|
||||
qpd->sh_mem_ape1_limit = 0;
|
||||
} else {
|
||||
/*
|
||||
* In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
|
||||
* SH_MEM_APE1_BASE[31:0], 0x0000 }
|
||||
* APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
|
||||
* SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
|
||||
* Verify that the base and size parameters can be
|
||||
* represented in this format and convert them.
|
||||
* Additionally restrict APE1 to user-mode addresses.
|
||||
*/
|
||||
|
||||
uint64_t base = (uintptr_t)alternate_aperture_base;
|
||||
uint64_t limit = base + alternate_aperture_size - 1;
|
||||
|
||||
if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
|
||||
(limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
|
||||
retval = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
qpd->sh_mem_ape1_base = base >> 16;
|
||||
qpd->sh_mem_ape1_limit = limit >> 16;
|
||||
}
|
||||
|
||||
default_mtype = (default_policy == cache_policy_coherent) ?
|
||||
MTYPE_NONCACHED :
|
||||
|
@ -97,37 +135,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
|
|||
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
|
||||
| DEFAULT_MTYPE(default_mtype)
|
||||
| APE1_MTYPE(ape1_mtype);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int update_qpd_cik(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct kfd_process_device *pdd;
|
||||
unsigned int temp;
|
||||
|
||||
pdd = qpd_to_pdd(qpd);
|
||||
|
||||
/* check if sh_mem_config register already configured */
|
||||
if (qpd->sh_mem_config == 0) {
|
||||
qpd->sh_mem_config =
|
||||
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
|
||||
DEFAULT_MTYPE(MTYPE_NONCACHED) |
|
||||
APE1_MTYPE(MTYPE_NONCACHED);
|
||||
qpd->sh_mem_ape1_limit = 0;
|
||||
qpd->sh_mem_ape1_base = 0;
|
||||
}
|
||||
|
||||
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
|
||||
* aperture addresses.
|
||||
*/
|
||||
temp = get_sh_mem_bases_nybble_64(pdd);
|
||||
temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
|
||||
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
|
||||
|
||||
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
|
||||
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
|
||||
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int update_qpd_cik(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user