mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-13 04:39:36 +02:00
drm/amdgpu/pm: Fix the null pointer dereference for smu7
[ Upstream commit c02c1960c9
]
optimize the code to avoid pass a null pointer (hwmgr->backend)
to function smu7_update_edc_leakage_table.
Signed-off-by: Ma Jun <Jun.Ma2@amd.com>
Reviewed-by: Yang Wang <kevinyang.wang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
837ab17cee
commit
09544cd95c
|
@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
|
||||||
|
|
||||||
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = hwmgr->adev;
|
||||||
struct smu7_hwmgr *data;
|
struct smu7_hwmgr *data;
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
|
@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||||
/* Initalize Dynamic State Adjustment Rule Settings */
|
/* Initalize Dynamic State Adjustment Rule Settings */
|
||||||
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
|
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
|
||||||
|
|
||||||
if (0 == result) {
|
if (result)
|
||||||
struct amdgpu_device *adev = hwmgr->adev;
|
goto fail;
|
||||||
|
|
||||||
data->is_tlu_enabled = false;
|
data->is_tlu_enabled = false;
|
||||||
|
|
||||||
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
|
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
|
||||||
SMU7_MAX_HARDWARE_POWERLEVELS;
|
SMU7_MAX_HARDWARE_POWERLEVELS;
|
||||||
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
|
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
|
||||||
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
|
||||||
|
|
||||||
data->pcie_gen_cap = adev->pm.pcie_gen_mask;
|
data->pcie_gen_cap = adev->pm.pcie_gen_mask;
|
||||||
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||||
data->pcie_spc_cap = 20;
|
data->pcie_spc_cap = 20;
|
||||||
else
|
else
|
||||||
data->pcie_spc_cap = 16;
|
data->pcie_spc_cap = 16;
|
||||||
data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
|
data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
|
||||||
|
|
||||||
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
|
hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
|
||||||
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
|
/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
|
||||||
hwmgr->platform_descriptor.clockStep.engineClock = 500;
|
hwmgr->platform_descriptor.clockStep.engineClock = 500;
|
||||||
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
|
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
|
||||||
smu7_thermal_parameter_init(hwmgr);
|
smu7_thermal_parameter_init(hwmgr);
|
||||||
} else {
|
|
||||||
/* Ignore return value in here, we are cleaning up a mess. */
|
|
||||||
smu7_hwmgr_backend_fini(hwmgr);
|
|
||||||
}
|
|
||||||
|
|
||||||
result = smu7_update_edc_leakage_table(hwmgr);
|
result = smu7_update_edc_leakage_table(hwmgr);
|
||||||
if (result) {
|
if (result)
|
||||||
smu7_hwmgr_backend_fini(hwmgr);
|
goto fail;
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
fail:
|
||||||
|
smu7_hwmgr_backend_fini(hwmgr);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
|
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user