drm fixes for 6.16-rc1

(amdkfd on riscv is more a feature).
 
 panel:
 - nt37801: fix IS_ERR
 - nt37801: fix KConfig
 
 connector:
 - Fix null deref in HDMI audio helper.
 
 bridge:
 - analogix_dp: fixup clk-disable removal
 
 msm:
 - mailmap updates
 
 i915:
 - Fix the enabling/disabling of DP audio SDP splitting
 - Fix PSR register definitions for ALPM
 - Fix u32 overflow in SNPS PHY HDMI PLL setup
 - Fix GuC pending message underflow when submit fails
 - Fix GuC wakeref underflow race during reset
 
 xe:
 - Two documentation fixes
 - A couple of vm init fixes
 - Hwmon fixes
 - Drop reduntant conversion to bool
 - Fix CONFIG_INTEL_VSEC dependency
 - Rework eviction rejection of bound external bos
 - Stop re-submitting signalled jobs
 - A couple of pxp fixes
 - Add back a fix that got lost in a merge
 - Create LRC bo without VM
 - Fix for the above fix
 
 amdgpu:
 - UserQ fixes
 - SMU 13.x fixes
 - VCN fixes
 - JPEG fixes
 - Misc cleanups
 - runtime pm fix
 - DCN 4.0.1 fixes
 - Misc display fixes
 - ISP fix
 - VRAM manager fix
 - RAS fixes
 - IP discovery fix
 - Cleaner shader fix for GC 10.1.x
 - OD fix
 - Non-OLED panel fix
 - Misc display fixes
 - Brightness fixes
 
 amdkfd:
 - Enable CONFIG_HSA_AMD on RISCV
 - SVM fix
 - Misc cleanups
 - Ref leak fix
 - WPTR BO fix
 
 radeon:
 - Misc cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmhCg/4ACgkQDHTzWXnE
 hr7Q0g//an3wQGf8KgZxCs8DxVVA3zSrUDLiAbs5hsZJDNtd9uGqzy9pZzIV+cVK
 rguAcM/AEVvY/ET1PCVh1FlJ8jMadlGGX6MuegUzdzQ/wB7puwZ+KRZAMmSVEiY6
 7PVKceeJ2bnCK+Vn/SdXpD1s4AXn3hMCyuTfvOC4fJuee/qW62H/wl4ivXzilvvf
 DBSSlpjEcTSKJVRveOw1AL678Z34JhoUB3oek0kpx9TyF4rdKs5qDStEUxMIhpD8
 22vN5oF0UOU93N53udCt4gGQ/Xfqyyl03XP2JYnNmCMJB+BGSR/u/u59cjnvkwDs
 TQBBS8gXfAdRCEPrvtDGNZOLxEhPl+ZaKoTqRp6qi4uL7nUc8NTVBE3UTkt6LVcx
 W1HY5+QzuLPH73QUSSHL609qz1X1aRLWgFh+/Fo82LYh3ORtO6BwbQLP6ZGkbNzm
 GTRqLAmzprL2XisrxP0gsdvgpRplXjwxx7RzCE6evr/u+lMRr4dxoSx1k2C0vVhS
 sFoFjHdrWvHO8KtM14vTt/F7J79suqgQBqF37s8s1e5ptDra4aDQEzCAXxJYx6Pg
 2Q7tamvwaJndQUojd858+OU8lHVWDKm6eYuA4WrbbomT31CVkAWWrmcIiS3CBBX1
 6U0J4h8JcGilbCuPHCP2c9ibakkF/jkO+tZAgW88C/enF9r59r8=
 =jZKo
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2025-06-06' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "This is pretty much two weeks worth of fixes, plus one thing that
  might be considered next: amdkfd is now able to be enabled on risc-v
  platforms.

  Otherwise, amdgpu and xe with the majority of fixes, and then a
  smattering all over.

  panel:
   - nt37801: fix IS_ERR
   - nt37801: fix KConfig

  connector:
   - Fix null deref in HDMI audio helper.

  bridge:
   - analogix_dp: fixup clk-disable removal

  nouveau:
   - minor typo fix (',' vs ';')

  msm:
   - mailmap updates

  i915:
   - Fix the enabling/disabling of DP audio SDP splitting
   - Fix PSR register definitions for ALPM
   - Fix u32 overflow in SNPS PHY HDMI PLL setup
   - Fix GuC pending message underflow when submit fails
   - Fix GuC wakeref underflow race during reset

  xe:
   - Two documentation fixes
   - A couple of vm init fixes
   - Hwmon fixes
   - Drop reduntant conversion to bool
   - Fix CONFIG_INTEL_VSEC dependency
   - Rework eviction rejection of bound external bos
   - Stop re-submitting signalled jobs
   - A couple of pxp fixes
   - Add back a fix that got lost in a merge
   - Create LRC bo without VM
   - Fix for the above fix

  amdgpu:
   - UserQ fixes
   - SMU 13.x fixes
   - VCN fixes
   - JPEG fixes
   - Misc cleanups
   - runtime pm fix
   - DCN 4.0.1 fixes
   - Misc display fixes
   - ISP fix
   - VRAM manager fix
   - RAS fixes
   - IP discovery fix
   - Cleaner shader fix for GC 10.1.x
   - OD fix
   - Non-OLED panel fix
   - Misc display fixes
   - Brightness fixes

  amdkfd:
   - Enable CONFIG_HSA_AMD on RISCV
   - SVM fix
   - Misc cleanups
   - Ref leak fix
   - WPTR BO fix

  radeon:
   - Misc cleanups"

* tag 'drm-next-2025-06-06' of https://gitlab.freedesktop.org/drm/kernel: (105 commits)
  drm/nouveau/vfn/r535: Convert comma to semicolon
  drm/xe: remove unmatched xe_vm_unlock() from __xe_exec_queue_init()
  drm/xe: Create LRC BO without VM
  drm/xe/guc_submit: add back fix
  drm/xe/pxp: Clarify PXP queue creation behavior if PXP is not ready
  drm/xe/pxp: Use the correct define in the set_property_funcs array
  drm/xe/sched: stop re-submitting signalled jobs
  drm/xe: Rework eviction rejection of bound external bos
  drm/xe/vsec: fix CONFIG_INTEL_VSEC dependency
  drm/xe: drop redundant conversion to bool
  drm/xe/hwmon: Move card reactive critical power under channel card
  drm/xe/hwmon: Add support to manage power limits though mailbox
  drm/xe/vm: move xe_svm_init() earlier
  drm/xe/vm: move rebind_work init earlier
  MAINTAINERS: .mailmap: update Rob Clark's email address
  mailmap: Update entry for Akhil P Oommen
  MAINTAINERS: update my email address
  MAINTAINERS: drop myself as maintainer
  drm/i915/display: Fix u32 overflow in SNPS PHY HDMI PLL setup
  drm/amd/display: Fix default DC and AC levels
  ...
This commit is contained in:
Linus Torvalds 2025-06-06 08:09:56 -07:00
commit e332935a54
140 changed files with 2317 additions and 636 deletions

View File

@ -21,7 +21,8 @@ Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Adrian Bunk <bunk@stusta.de>
Ajay Kaher <ajay.kaher@broadcom.com> <akaher@vmware.com>
Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
Akhil P Oommen <akhilpo@oss.qualcomm.com> <akhilpo@codeaurora.org>
Akhil P Oommen <akhilpo@oss.qualcomm.com> <quic_akhilpo@quicinc.com>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
@ -640,6 +641,8 @@ Richard Genoud <richard.genoud@bootlin.com> <richard.genoud@gmail.com>
Richard Leitner <richard.leitner@linux.dev> <dev@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
Rob Clark <robin.clark@oss.qualcomm.com> <robdclark@chromium.org>
Rob Clark <robin.clark@oss.qualcomm.com> <robdclark@gmail.com>
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>

View File

@ -60,26 +60,26 @@ Description: RO. Package default power limit (default TDP setting).
Only supported for particular Intel Xe graphics platforms.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power2_crit
Date: February 2024
KernelVersion: 6.8
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
Date: May 2025
KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RW. Package reactive critical (I1) power limit in microwatts.
Description: RW. Card reactive critical (I1) power limit in microwatts.
Package reactive critical (I1) power limit in microwatts is exposed
Card reactive critical (I1) power limit in microwatts is exposed
for client products. The power controller will throttle the
operating frequency if the power averaged over a window exceeds
this limit.
Only supported for particular Intel Xe graphics platforms.
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr2_crit
Date: February 2024
KernelVersion: 6.8
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
Date: May 2025
KernelVersion: 6.15
Contact: intel-xe@lists.freedesktop.org
Description: RW. Package reactive critical (I1) power limit in milliamperes.
Description: RW. Card reactive critical (I1) power limit in milliamperes.
Package reactive critical (I1) power limit in milliamperes is
Card reactive critical (I1) power limit in milliamperes is
exposed for server products. The power controller will throttle
the operating frequency if the power averaged over a window
exceeds this limit.

View File

@ -16,6 +16,7 @@ DG2, etc is provided to prototype the driver.
xe_migrate
xe_cs
xe_pm
xe_gt_freq
xe_pcode
xe_gt_mcr
xe_wa

View File

@ -0,0 +1,14 @@
.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
==========================
Xe GT Frequency Management
==========================
.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
:doc: Xe GT Frequency Management
Internal API
============
.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
:internal:

View File

@ -7599,7 +7599,7 @@ F: Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
F: drivers/gpu/drm/tiny/panel-mipi-dbi.c
DRM DRIVER for Qualcomm Adreno GPUs
M: Rob Clark <robdclark@gmail.com>
M: Rob Clark <robin.clark@oss.qualcomm.com>
R: Sean Paul <sean@poorly.run>
R: Konrad Dybcio <konradybcio@kernel.org>
L: linux-arm-msm@vger.kernel.org
@ -7618,9 +7618,10 @@ F: drivers/gpu/drm/msm/registers/adreno/
F: include/uapi/drm/msm_drm.h
DRM DRIVER for Qualcomm display hardware
M: Rob Clark <robdclark@gmail.com>
M: Abhinav Kumar <quic_abhinavk@quicinc.com>
M: Rob Clark <robin.clark@oss.qualcomm.com>
M: Dmitry Baryshkov <lumag@kernel.org>
R: Abhinav Kumar <abhinav.kumar@linux.dev>
R: Jessica Zhang <jessica.zhang@oss.qualcomm.com>
R: Sean Paul <sean@poorly.run>
R: Marijn Suijten <marijn.suijten@somainline.org>
L: linux-arm-msm@vger.kernel.org
@ -20396,7 +20397,7 @@ F: drivers/soc/qcom/icc-bwmon.c
F: drivers/soc/qcom/trace_icc-bwmon.h
QUALCOMM IOMMU
M: Rob Clark <robdclark@gmail.com>
M: Rob Clark <robin.clark@oss.qualcomm.com>
L: iommu@lists.linux.dev
L: linux-arm-msm@vger.kernel.org
S: Maintained
@ -20439,7 +20440,7 @@ F: drivers/regulator/vqmmc-ipq4019-regulator.c
QUALCOMM IRIS VIDEO ACCELERATOR DRIVER
M: Vikash Garodia <quic_vgarodia@quicinc.com>
M: Dikshita Agarwal <quic_dikshita@quicinc.com>
R: Abhinav Kumar <quic_abhinavk@quicinc.com>
R: Abhinav Kumar <abhinav.kumar@linux.dev>
R: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
L: linux-media@vger.kernel.org
L: linux-arm-msm@vger.kernel.org

View File

@ -76,7 +76,7 @@ config DRM_AMDGPU_USERPTR
config DRM_AMD_ISP
bool "Enable AMD Image Signal Processor IP support"
depends on DRM_AMDGPU
depends on DRM_AMDGPU && ACPI
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help

View File

@ -1713,6 +1713,10 @@ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { retu
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
#if defined(CONFIG_DRM_AMD_ISP)
int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);

View File

@ -1532,5 +1532,35 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
return true;
#endif /* CONFIG_AMD_PMC */
}
#endif /* CONFIG_SUSPEND */
#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
static const struct acpi_device_id isp_sensor_ids[] = {
{ "OMNI5C10" },
{ }
};
static int isp_match_acpi_device_ids(struct device *dev, const void *data)
{
return acpi_match_device(data, dev) ? 1 : 0;
}
int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
{
struct device *pdev __free(put_device) = NULL;
struct acpi_device *acpi_pdev;
pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
isp_match_acpi_device_ids);
if (!pdev)
return -EINVAL;
acpi_pdev = ACPI_COMPANION(pdev);
if (!acpi_pdev)
return -ENODEV;
strscpy(*hid, acpi_device_hid(acpi_pdev));
return 0;
}
#endif /* CONFIG_DRM_AMD_ISP */

View File

@ -368,6 +368,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
if (!bo || !*bo)
return;
(void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);

View File

@ -919,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
return timeout;
}
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
@ -949,19 +949,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id;
amdgpu_ctx_mgr_entity_fini(mgr);
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}

View File

@ -92,7 +92,6 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,

View File

@ -512,12 +512,13 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
if (!adev->gmc.noretry)
if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
if (!amdgpu_passthrough(adev))
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
@ -4728,7 +4729,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
amdgpu_xcp_cfg_sysfs_init(adev);
amdgpu_xcp_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@ -4858,7 +4859,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
amdgpu_xcp_cfg_sysfs_fini(adev);
amdgpu_xcp_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);

View File

@ -270,9 +270,10 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
bool sz_valid = true;
uint64_t vram_size;
u32 msg;
int i, ret = 0;
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
/* It can take up to a second for IFWI init to complete on some dGPUs,
@ -291,9 +292,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
}
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
if (!vram_size || vram_size == U32_MAX)
sz_valid = false;
else
vram_size <<= 20;
if (vram_size) {
if (sz_valid) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->mman.discovery_tmr_size, false);
@ -301,6 +306,11 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
if (ret)
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
return ret;
}

View File

@ -2913,8 +2913,8 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
if (fpriv) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
}
return drm_release(inode, filp);

View File

@ -108,13 +108,22 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
struct amdgpu_eviction_fence *ev_fence;
mutex_lock(&uq_mgr->userq_mutex);
spin_lock(&evf_mgr->ev_fence_lock);
ev_fence = evf_mgr->ev_fence;
if (!ev_fence)
if (ev_fence)
dma_fence_get(&ev_fence->base);
else
goto unlock;
spin_unlock(&evf_mgr->ev_fence_lock);
amdgpu_userq_evict(uq_mgr, ev_fence);
mutex_unlock(&uq_mgr->userq_mutex);
dma_fence_put(&ev_fence->base);
return;
unlock:
spin_unlock(&evf_mgr->ev_fence_lock);
mutex_unlock(&uq_mgr->userq_mutex);
}

View File

@ -58,7 +58,7 @@ amdgpu_gem_add_input_fence(struct drm_file *filp,
return 0;
syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
sizeof(uint32_t) * num_syncobj_handles);
size_mul(sizeof(uint32_t), num_syncobj_handles));
if (IS_ERR(syncobj_handles))
return PTR_ERR(syncobj_handles);

View File

@ -2228,6 +2228,9 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
enum PP_SMC_POWER_PROFILE profile;
int r;
if (amdgpu_dpm_is_overdrive_enabled(adev))
return;
if (adev->gfx.num_gfx_rings)
profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
else
@ -2258,6 +2261,11 @@ void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (amdgpu_dpm_is_overdrive_enabled(adev))
return;
atomic_dec(&ring->adev->gfx.total_submission_cnt);
schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);

View File

@ -1502,11 +1502,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(pd);
}
if (!fpriv->evf_mgr.fd_closing) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
}
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);

View File

@ -300,7 +300,9 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to map legacy queue\n");
@ -323,7 +325,9 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
queue_input.trail_fence_addr = gpu_addr;
queue_input.trail_fence_data = seq;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to unmap legacy queue\n");
@ -353,7 +357,9 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
queue_input.legacy_gfx = true;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
DRM_ERROR("failed to reset legacy queue\n");
@ -383,7 +389,9 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
goto error;
}
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
else
@ -411,7 +419,9 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev,
goto error;
}
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
@ -438,32 +448,9 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
goto error;
}
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
dev_err(adev->dev, "failed to reg_write_reg_wait\n");
error:
return r;
}
int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
uint32_t val, uint32_t mask)
{
struct mes_misc_op_input op_input;
int r;
op_input.op = MES_MISC_OP_WRM_REG_WAIT;
op_input.wrm_reg.reg0 = reg;
op_input.wrm_reg.ref = val;
op_input.wrm_reg.mask = mask;
if (!adev->mes.funcs->misc_op) {
dev_err(adev->dev, "mes reg wait is not supported!\n");
r = -EINVAL;
goto error;
}
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to reg_write_reg_wait\n");
@ -539,42 +526,6 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
return r;
}
#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
do { \
if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
return offsetof(struct amdgpu_mes_ctx_meta_data, \
_eng[ring->idx].slots[id_offs]); \
else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
return offsetof(struct amdgpu_mes_ctx_meta_data, \
_eng[ring->idx].ring); \
else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
return offsetof(struct amdgpu_mes_ctx_meta_data, \
_eng[ring->idx].ib); \
else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
return offsetof(struct amdgpu_mes_ctx_meta_data, \
_eng[ring->idx].padding); \
} while(0)
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
{
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
break;
case AMDGPU_RING_TYPE_COMPUTE:
DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
break;
case AMDGPU_RING_TYPE_SDMA:
DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
break;
default:
break;
}
WARN_ON(1);
return -EINVAL;
}
uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
enum amdgpu_mes_priority_level prio)
{
@ -694,7 +645,9 @@ static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
goto error;
}
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
dev_err(adev->dev, "failed to change_config.\n");

View File

@ -372,8 +372,6 @@ struct amdgpu_mes_funcs {
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
@ -395,8 +393,6 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
uint32_t val, uint32_t mask);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);

View File

@ -2859,6 +2859,15 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
return -EINVAL;
}
} else {
if (bps[0].address == 0) {
/* for specific old eeprom data, mca address is not stored,
* calc it from pa
*/
if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
&(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
return -EINVAL;
}
if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
if (nps == AMDGPU_NPS1_PARTITION_MODE)
memcpy(err_data->err_addr, bps,
@ -2886,8 +2895,20 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
} else {
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
return -EINVAL;
if (bps->address) {
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
return -EINVAL;
} else {
/* for specific old eeprom data, mca address is not stored,
* calc it from pa
*/
if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
&(bps->address), AMDGPU_NPS1_PARTITION_MODE))
return -EINVAL;
if (amdgpu_ras_mca2pa(adev, bps, err_data))
return -EOPNOTSUPP;
}
}
return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
@ -3708,7 +3729,8 @@ static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev
*/
if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else

View File

@ -113,6 +113,7 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src fence_irq;
struct amdgpu_irq_src ecc_irq;
struct amdgpu_irq_src vm_hole_irq;
struct amdgpu_irq_src doorbell_invalid_irq;

View File

@ -139,7 +139,7 @@ void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
vm = &fpriv->vm;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))

View File

@ -765,6 +765,7 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
@ -780,9 +781,10 @@ static struct attribute *fw_attrs[] = {
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
&dev_attr_pldm_fw_version.attr, NULL
&dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
&dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
&dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
NULL
};
#define to_dev_attr(x) container_of(x, struct device_attribute, attr)

View File

@ -562,3 +562,26 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
return 0;
}
int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
{
struct ta_ras_query_address_input addr_in;
struct ta_ras_query_address_output addr_out;
int ret;
/* nps: the pa belongs to */
addr_in.pa.pa = pa | ((uint64_t)nps << 58);
addr_in.addr_type = TA_RAS_PA_TO_MCA;
ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
if (ret) {
dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
pa);
return ret;
}
*mca = addr_out.ma.err_addr;
return 0;
}

View File

@ -189,4 +189,6 @@ int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch, uint32_t umc,
uint32_t node, uint32_t socket,
struct ta_ras_query_address_output *addr_out, bool dump_addr);
int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
#endif

View File

@ -430,7 +430,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
sizeof(u32) * num_syncobj_handles);
size_mul(sizeof(u32), num_syncobj_handles));
if (IS_ERR(syncobj_handles))
return PTR_ERR(syncobj_handles);
@ -612,13 +612,13 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
sizeof(u32) * num_read_bo_handles);
size_mul(sizeof(u32), num_read_bo_handles));
if (IS_ERR(bo_handles_read))
return PTR_ERR(bo_handles_read);
num_write_bo_handles = wait_info->num_bo_write_handles;
bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
sizeof(u32) * num_write_bo_handles);
size_mul(sizeof(u32), num_write_bo_handles));
if (IS_ERR(bo_handles_write)) {
r = PTR_ERR(bo_handles_write);
goto free_bo_handles_read;
@ -626,7 +626,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
num_syncobj = wait_info->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
sizeof(u32) * num_syncobj);
size_mul(sizeof(u32), num_syncobj));
if (IS_ERR(syncobj_handles)) {
r = PTR_ERR(syncobj_handles);
goto free_bo_handles_write;

View File

@ -463,7 +463,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
if (!lpfn)
if (!lpfn || lpfn > man->size)
lpfn = man->size;
fpfn = (u64)place->fpfn << PAGE_SHIFT;

View File

@ -27,6 +27,9 @@
#include <drm/drm_drv.h>
#include "../amdxcp/amdgpu_xcp_drv.h"
static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{
@ -189,7 +192,7 @@ static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
amdgpu_xcp_sysfs_entries_update(xcp_mgr);
out:
mutex_unlock(&xcp_mgr->xcp_lock);
@ -263,9 +266,10 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
return 0;
ret = 0;
goto out;
} else if (ret) {
return ret;
goto out;
}
/* Redirect all IOCTLs to the primary device */
@ -278,9 +282,14 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
p_ddev->vma_offset_manager = ddev->vma_offset_manager;
p_ddev->driver = &amdgpu_partition_driver;
adev->xcp_mgr->xcp[i].ddev = p_ddev;
}
return 0;
dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
}
ret = 0;
out:
amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
return ret;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
@ -288,6 +297,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{
struct amdgpu_xcp_mgr *xcp_mgr;
int i;
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
@ -306,6 +316,8 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
adev->xcp_mgr = xcp_mgr;
for (i = 0; i < MAX_XCP; ++i)
xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
return amdgpu_xcp_dev_alloc(adev);
}
@ -433,6 +445,7 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
struct amdgpu_xcp_res_details *xcp_res, char *buf) \
@ -635,7 +648,7 @@ static const struct attribute *xcp_attrs[] = {
NULL,
};
void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
@ -703,7 +716,7 @@ err1:
kobject_put(&xcp_cfg->kobj);
}
void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
{
struct amdgpu_xcp_res_details *xcp_res;
struct amdgpu_xcp_cfg *xcp_cfg;
@ -722,3 +735,124 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
kobject_put(&xcp_cfg->kobj);
}
/*====================== xcp sysfs - data entries ======================*/
#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
static ssize_t xcp_metrics_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct amdgpu_xcp *xcp = to_xcp(kobj);
struct amdgpu_xcp_mgr *xcp_mgr;
ssize_t size;
xcp_mgr = xcp->xcp_mgr;
size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
if (size <= 0)
return size;
if (size > PAGE_SIZE)
return -ENOSPC;
return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
}
static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct amdgpu_xcp *xcp = to_xcp(kobj);
if (!xcp || !xcp->valid)
return 0;
return attr->mode;
}
static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
static struct attribute *amdgpu_xcp_attrs[] = {
&xcp_sysfs_metrics.attr,
NULL,
};
static const struct attribute_group amdgpu_xcp_attrs_group = {
.attrs = amdgpu_xcp_attrs,
.is_visible = amdgpu_xcp_attrs_is_visible
};
static const struct kobj_type xcp_sysfs_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
};
static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
{
struct amdgpu_xcp *xcp;
for (n--; n >= 0; n--) {
xcp = &xcp_mgr->xcp[n];
if (!xcp->ddev || !xcp->valid)
continue;
sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
kobject_put(&xcp->kobj);
}
}
static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
{
struct amdgpu_xcp *xcp;
int i, r;
for (i = 0; i < MAX_XCP; i++) {
/* Redirect all IOCTLs to the primary device */
xcp = &xcp_mgr->xcp[i];
if (!xcp->ddev)
break;
r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
&xcp->ddev->dev->kobj, "xcp");
if (r)
goto out;
r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
if (r)
goto out;
}
return;
out:
kobject_put(&xcp->kobj);
}
static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
{
struct amdgpu_xcp *xcp;
int i;
for (i = 0; i < MAX_XCP; i++) {
/* Redirect all IOCTLs to the primary device */
xcp = &xcp_mgr->xcp[i];
if (!xcp->ddev)
continue;
sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
}
return;
}
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
{
if (!adev->xcp_mgr)
return;
amdgpu_xcp_cfg_sysfs_init(adev);
return;
}
void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
{
if (!adev->xcp_mgr)
return;
amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
amdgpu_xcp_cfg_sysfs_fini(adev);
}

View File

@ -108,6 +108,8 @@ struct amdgpu_xcp {
struct drm_driver *driver;
struct drm_vma_offset_manager *vma_offset_manager;
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
};
struct amdgpu_xcp_mgr {
@ -175,8 +177,8 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \

View File

@ -294,6 +294,23 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
{
int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
if (link_num < ARRAY_SIZE(link_map_6_4_x))
return link_map_6_4_x[link_num];
break;
default:
return -EINVAL;
}
return -EINVAL;
}
static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
{
const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };

View File

@ -125,6 +125,7 @@ int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
int req_nps_mode);
int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
int global_link_num);
int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);

View File

@ -43,9 +43,9 @@ static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
0xd70f6a01, 0x000202ff,
0x00000400, 0x80828102,
0xbf84fff7, 0xbefc03ff,
0x00000068, 0xbe803080,
0xbe813080, 0xbe823080,
0xbe833080, 0x80fc847c,
0x00000068, 0xbe803000,
0xbe813000, 0xbe823000,
0xbe833000, 0x80fc847c,
0xbf84fffa, 0xbeea0480,
0xbeec0480, 0xbeee0480,
0xbef00480, 0xbef20480,

View File

@ -40,7 +40,6 @@ shader main
type(CS)
wave_size(32)
// Note: original source code from SQ team
//
// Create 32 waves in a threadgroup (CS waves)
// Each allocates 64 VGPRs
@ -71,8 +70,8 @@ label_0005:
s_sub_u32 s2, s2, 8
s_cbranch_scc0 label_0005
//
s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
// CLEAR LDS
//
@ -99,10 +98,10 @@ label_001F:
label_0023:
s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
label_sgpr_loop:
s_movreld_b32 s0, 0
s_movreld_b32 s1, 0
s_movreld_b32 s2, 0
s_movreld_b32 s3, 0
s_movreld_b32 s0, s0
s_movreld_b32 s1, s0
s_movreld_b32 s2, s0
s_movreld_b32 s3, s0
s_sub_u32 m0, m0, 4
s_cbranch_scc0 label_sgpr_loop

View File

@ -36,7 +36,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "soc24_enum.h"
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15.h"
#include "clearstate_gfx12.h"
@ -1453,28 +1453,28 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
GFX_12_0_0__SRCID__CP_EOP_INTERRUPT,
&adev->gfx.eop_irq);
if (r)
return r;
/* Bad opcode Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_BAD_OPCODE_ERROR,
GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR,
&adev->gfx.bad_op_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;

View File

@ -25,6 +25,7 @@
*
*/
#include <linux/gpio/machine.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@ -39,15 +40,45 @@ static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16
};
static struct gpiod_lookup_table isp_gpio_table = {
.dev_id = "amd_isp_capture",
.table = {
GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH),
{ }
},
};
static struct gpiod_lookup_table isp_sensor_gpio_table = {
.dev_id = "i2c-ov05c10",
.table = {
GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH),
{ }
},
};
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
struct amdgpu_device *adev = isp->adev;
int idx, int_idx, num_res, r;
u8 isp_dev_hid[ACPI_ID_LEN];
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
if (r) {
drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
/* allow GPU init to progress */
return 0;
}
/* add GPIO resources required for OMNI5C10 sensor */
if (!strcmp("OMNI5C10", isp_dev_hid)) {
gpiod_add_lookup_table(&isp_gpio_table);
gpiod_add_lookup_table(&isp_sensor_gpio_table);
}
isp_base = adev->rmmio_base;
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);

View File

@ -149,6 +149,18 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@ -434,6 +446,9 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
}
@ -1041,6 +1056,14 @@ static int jpeg_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -1200,6 +1223,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_irq_funcs = {
.process = jpeg_v4_0_3_process_interrupt,
};
static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = {
.set = jpeg_v4_0_3_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@ -1208,6 +1236,9 @@ static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
}
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs;
adev->jpeg.inst->ras_poison_irq.num_types = 1;
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = {
@ -1304,9 +1335,47 @@ static void jpeg_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
jpeg_v4_0_3_inst_reset_ras_error_count(adev, i);
}
static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_JPEG_V4_0_3_JPEG0:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
break;
case AMDGPU_JPEG_V4_0_3_JPEG1:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev)
{
uint32_t inst = 0, sub = 0, poison_stat = 0;
for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++)
poison_stat +=
jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.query_ras_error_count = jpeg_v4_0_3_query_ras_error_count,
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
.query_poison_status = jpeg_v4_0_3_query_ras_poison_status,
};
static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@ -1383,6 +1452,13 @@ static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
if (r)
goto late_fini;
}
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
&jpeg_v4_0_3_aca_info, NULL);
if (r)

View File

@ -46,6 +46,13 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
enum amdgpu_jpeg_v4_0_3_sub_block {
AMDGPU_JPEG_V4_0_3_JPEG0 = 0,
AMDGPU_JPEG_V4_0_3_JPEG1,
AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK,
};
extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block;
void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,

View File

@ -39,6 +39,7 @@ static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
static int amdgpu_ih_srcid_jpeg[] = {
@ -120,6 +121,7 @@ static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
jpeg_v5_0_1_set_dec_ring_funcs(adev);
jpeg_v5_0_1_set_irq_funcs(adev);
jpeg_v5_0_1_set_ras_funcs(adev);
return 0;
}
@ -144,6 +146,17 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
}
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
@ -296,6 +309,9 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
}
@ -723,6 +739,16 @@ static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
return 0;
}
static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -892,6 +918,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
.process = jpeg_v5_0_1_process_interrupt,
};
static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = {
.set = jpeg_v5_0_1_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
@ -900,6 +931,10 @@ static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
adev->jpeg.inst->ras_poison_irq.num_types = 1;
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
@ -909,3 +944,150 @@ const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
.rev = 1,
.funcs = &jpeg_v5_0_1_ip_funcs,
};
static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_JPEG_V5_0_1_JPEG0:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
break;
case AMDGPU_JPEG_V5_0_1_JPEG1:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev)
{
uint32_t inst = 0, sub = 0, poison_stat = 0;
for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++)
poison_stat +=
jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = {
.query_poison_status = jpeg_v5_0_1_query_ras_poison_status,
};
static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
enum aca_smu_type type, void *data)
{
struct aca_bank_info info;
u64 misc0;
int ret;
ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
return -EINVAL;
}
return ret;
}
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
24, 25, 26, 27, 28, 29, 30, 31
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
enum aca_smu_type type, void *data)
{
u32 instlo;
instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
instlo &= GENMASK(31, 1);
if (instlo != mmSMNAID_AID0_MCA_SMU)
return false;
if (aca_bank_check_error_codes(handle->adev, bank,
jpeg_v5_0_1_err_codes,
ARRAY_SIZE(jpeg_v5_0_1_err_codes)))
return false;
return true;
}
static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = {
.aca_bank_parser = jpeg_v5_0_1_aca_bank_parser,
.aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid,
};
static const struct aca_info jpeg_v5_0_1_aca_info = {
.hwip = ACA_HWIP_TYPE_SMU,
.mask = ACA_ERROR_UE_MASK,
.bank_ops = &jpeg_v5_0_1_aca_bank_ops,
};
static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
if (r)
goto late_fini;
}
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
&jpeg_v5_0_1_aca_info, NULL);
if (r)
goto late_fini;
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = {
.ras_block = {
.hw_ops = &jpeg_v5_0_1_ras_hw_ops,
.ras_late_init = jpeg_v5_0_1_ras_late_init,
},
};
static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
{
adev->jpeg.ras = &jpeg_v5_0_1_ras;
}

View File

@ -26,6 +26,9 @@
extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
#define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094
#define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640
#define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1
#define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649
@ -98,4 +101,11 @@ extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
enum amdgpu_jpeg_v5_0_1_sub_block {
AMDGPU_JPEG_V5_0_1_JPEG0 = 0,
AMDGPU_JPEG_V5_0_1_JPEG1,
AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK,
};
#endif /* __JPEG_V5_0_1_H__ */

View File

@ -44,6 +44,7 @@
#include "sdma_v6_0.h"
#include "v11_structs.h"
#include "mes_userqueue.h"
#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@ -893,6 +894,9 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address);
m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address);
return 0;
}
@ -1315,6 +1319,13 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
/* SDMA user fence event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__SDMA_FENCE,
&adev->sdma.fence_irq);
if (r)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@ -1575,25 +1586,9 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
struct amdgpu_mes_queue *queue;
mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
spin_lock(&adev->mes.queue_id_lock);
queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
if (queue) {
DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
amdgpu_fence_process(queue->ring);
}
spin_unlock(&adev->mes.queue_id_lock);
return 0;
}
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@ -1615,6 +1610,29 @@ static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u32 doorbell_offset = entry->src_data[0];
if (adev->enable_mes && doorbell_offset) {
struct amdgpu_userq_fence_driver *fence_drv = NULL;
struct xarray *xa = &adev->userq_xa;
unsigned long flags;
doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
xa_lock_irqsave(xa, flags);
fence_drv = xa_load(xa, doorbell_offset);
if (fence_drv)
amdgpu_userq_fence_driver_process(fence_drv);
xa_unlock_irqrestore(xa, flags);
}
return 0;
}
static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -1751,6 +1769,10 @@ static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
.process = sdma_v6_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = {
.process = sdma_v6_0_process_fence_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
.process = sdma_v6_0_process_illegal_inst_irq,
};
@ -1760,6 +1782,7 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
}

View File

@ -33,7 +33,7 @@
#include "gc/gc_12_0_0_offset.h"
#include "gc/gc_12_0_0_sh_mask.h"
#include "hdp/hdp_6_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "ivsrcid/gfx/irqsrcs_gfx_12_0_0.h"
#include "soc15_common.h"
#include "soc15.h"
@ -43,6 +43,7 @@
#include "sdma_v7_0.h"
#include "v12_structs.h"
#include "mes_userqueue.h"
#include "amdgpu_userq_fence.h"
MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
@ -910,6 +911,9 @@ static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr);
m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr);
m->sdmax_rlcx_mcu_dbg0 = lower_32_bits(prop->fence_address);
m->sdmax_rlcx_mcu_dbg1 = upper_32_bits(prop->fence_address);
return 0;
}
@ -1296,11 +1300,18 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__SDMA_TRAP,
GFX_12_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA user fence event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_12_0_0__SRCID__SDMA_FENCE,
&adev->sdma.fence_irq);
if (r)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@ -1526,25 +1537,9 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
struct amdgpu_mes_queue *queue;
mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
spin_lock(&adev->mes.queue_id_lock);
queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
if (queue) {
DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
amdgpu_fence_process(queue->ring);
}
spin_unlock(&adev->mes.queue_id_lock);
return 0;
}
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
@ -1566,6 +1561,29 @@ static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
static int sdma_v7_0_process_fence_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u32 doorbell_offset = entry->src_data[0];
if (adev->enable_mes && doorbell_offset) {
struct amdgpu_userq_fence_driver *fence_drv = NULL;
struct xarray *xa = &adev->userq_xa;
unsigned long flags;
doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
xa_lock_irqsave(xa, flags);
fence_drv = xa_load(xa, doorbell_offset);
if (fence_drv)
amdgpu_userq_fence_driver_process(fence_drv);
xa_unlock_irqrestore(xa, flags);
}
return 0;
}
static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@ -1703,6 +1721,10 @@ static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = {
.process = sdma_v7_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v7_0_fence_irq_funcs = {
.process = sdma_v7_0_process_fence_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = {
.process = sdma_v7_0_process_illegal_inst_irq,
};
@ -1712,6 +1734,7 @@ static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs;
adev->sdma.fence_irq.funcs = &sdma_v7_0_fence_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs;
}

View File

@ -1009,6 +1009,11 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 0);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1154,6 +1159,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst)
jpeg_v1_0_start(adev, 1);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1216,6 +1226,12 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_vcn_inst *vinst)
vcn_v1_0_enable_clock_gating(vinst);
vcn_1_0_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1250,6 +1266,11 @@ static int vcn_v1_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}

View File

@ -978,6 +978,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1152,6 +1158,11 @@ static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1183,6 +1194,11 @@ static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(UVD, 0, mmUVD_STATUS);
return 0;
}
@ -1248,6 +1264,11 @@ static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
vcn_v2_0_enable_clock_gating(vinst);
vcn_v2_0_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, 0, mmUVD_STATUS);
power_off:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, 0);

View File

@ -1158,6 +1158,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
@ -1343,6 +1348,11 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
return 0;
}
@ -1569,6 +1579,11 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
@ -1635,6 +1650,10 @@ static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst)
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);

View File

@ -1173,6 +1173,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
@ -1360,6 +1365,11 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
}
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
return 0;
}
@ -1602,6 +1612,11 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
return 0;
}
@ -1674,6 +1689,11 @@ static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v3_0_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, mmUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);

View File

@ -1122,6 +1122,11 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
return 0;
}
@ -1303,6 +1308,11 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
return 0;
}
@ -1583,6 +1593,11 @@ static void vcn_v4_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@ -1666,6 +1681,11 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);

View File

@ -169,6 +169,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_sw_init(adev, i);
@ -387,6 +391,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
}
@ -970,6 +977,11 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
/*resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@ -1363,6 +1375,12 @@ static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@ -1446,6 +1464,11 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
/* apply HW clock gating */
vcn_v4_0_3_enable_clock_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
Done:
return 0;
}
@ -1814,11 +1837,24 @@ static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
return 0;
}
static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
.set = vcn_v4_0_3_set_interrupt_state,
.process = vcn_v4_0_3_process_interrupt,
};
static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
.set = vcn_v4_0_3_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
/**
* vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
*
@ -1834,6 +1870,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.num_types++;
}
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
adev->vcn.inst->ras_poison_irq.num_types = 1;
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@ -1981,9 +2020,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
}
static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
{
uint32_t inst, sub;
uint32_t poison_stat = 0;
for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
poison_stat +=
vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
.query_poison_status = vcn_v4_0_3_query_poison_status,
};
static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
@ -2059,6 +2133,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->vcn.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
if (r)
goto late_fini;
}
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
&vcn_v4_0_3_aca_info, NULL);
if (r)

View File

@ -24,6 +24,12 @@
#ifndef __VCN_V4_0_3_H__
#define __VCN_V4_0_3_H__
enum amdgpu_vcn_v4_0_3_sub_block {
AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
};
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,

View File

@ -1254,6 +1254,11 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
}
/**
@ -1337,6 +1342,11 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v4_0_5_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);

View File

@ -794,6 +794,11 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
return 0;
}
@ -946,6 +951,11 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
return 0;
}
@ -977,6 +987,11 @@ static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, inst_idx, regUVD_STATUS);
return;
}
@ -1058,6 +1073,11 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
/* enable VCN power gating */
vcn_v5_0_0_enable_static_power_gating(vinst);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, i, regUVD_STATUS);
done:
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_vcn(adev, false, i);

View File

@ -46,7 +46,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v5_0_1_early_init - set function pointers and load microcode
*
@ -66,6 +66,7 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_set_unified_ring_funcs(adev);
vcn_v5_0_1_set_irq_funcs(adev);
vcn_v5_0_1_set_ras_funcs(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state;
@ -113,6 +114,10 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
@ -279,6 +284,9 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
}
@ -1030,6 +1038,11 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@ -1064,6 +1077,11 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
}
/**
@ -1139,6 +1157,11 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
/* clear status */
WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
/* Keeping one read-back to ensure all register writes are done,
* otherwise it may introduce race conditions.
*/
RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
return 0;
}
@ -1391,10 +1414,24 @@ static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgp
return 0;
}
static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
.process = vcn_v5_0_1_process_interrupt,
};
static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = {
.set = vcn_v5_0_1_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
/**
* vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
*
@ -1408,7 +1445,12 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
adev->vcn.inst->irq.num_types++;
adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
adev->vcn.inst->ras_poison_irq.num_types = 1;
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs;
}
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
@ -1440,3 +1482,139 @@ const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
.rev = 1,
.funcs = &vcn_v5_0_1_ip_funcs,
};
static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_VCN_V5_0_1_VCPU_VCODEC:
reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev)
{
uint32_t inst, sub;
uint32_t poison_stat = 0;
for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++)
poison_stat +=
vcn_v5_0_1_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = {
.query_poison_status = vcn_v5_0_1_query_poison_status,
};
static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
enum aca_smu_type type, void *data)
{
struct aca_bank_info info;
u64 misc0;
int ret;
ret = aca_bank_info_decode(bank, &info);
if (ret)
return ret;
misc0 = bank->regs[ACA_REG_IDX_MISC0];
switch (type) {
case ACA_SMU_TYPE_UE:
bank->aca_err_type = ACA_ERROR_TYPE_UE;
ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
1ULL);
break;
case ACA_SMU_TYPE_CE:
bank->aca_err_type = ACA_ERROR_TYPE_CE;
ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
ACA_REG__MISC0__ERRCNT(misc0));
break;
default:
return -EINVAL;
}
return ret;
}
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
14, 15, /* VCN */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
enum aca_smu_type type, void *data)
{
u32 instlo;
instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
instlo &= GENMASK(31, 1);
if (instlo != mmSMNAID_AID0_MCA_SMU)
return false;
if (aca_bank_check_error_codes(handle->adev, bank,
vcn_v5_0_1_err_codes,
ARRAY_SIZE(vcn_v5_0_1_err_codes)))
return false;
return true;
}
static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = {
.aca_bank_parser = vcn_v5_0_1_aca_bank_parser,
.aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid,
};
static const struct aca_info vcn_v5_0_1_aca_info = {
.hwip = ACA_HWIP_TYPE_SMU,
.mask = ACA_ERROR_UE_MASK,
.bank_ops = &vcn_v5_0_1_aca_bank_ops,
};
static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
&vcn_v5_0_1_aca_info, NULL);
if (r)
goto late_fini;
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
static struct amdgpu_vcn_ras vcn_v5_0_1_ras = {
.ras_block = {
.hw_ops = &vcn_v5_0_1_ras_hw_ops,
.ras_late_init = vcn_v5_0_1_ras_late_init,
},
};
static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev)
{
adev->vcn.ras = &vcn_v5_0_1_ras;
}

View File

@ -27,6 +27,13 @@
#define regVCN_RRMT_CNTL 0x0940
#define regVCN_RRMT_CNTL_BASE_IDX 1
enum amdgpu_vcn_v5_0_1_sub_block {
AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0,
AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK,
};
extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
#endif /* __VCN_v5_0_1_H__ */

View File

@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR

View File

@ -91,7 +91,6 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
@ -125,11 +124,7 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
return;
}
if (info.vmid == vmid)
kfd_signal_vm_fault_event(pdd, &info, NULL);
else
kfd_signal_vm_fault_event(pdd, &info, NULL);
kfd_signal_vm_fault_event(pdd, &info, NULL);
kfd_unref_process(p);
}
}

View File

@ -2039,9 +2039,7 @@ static int criu_get_process_object_info(struct kfd_process *p,
num_events = kfd_get_num_events(p);
ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
if (ret)
return ret;
svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
*num_objects = num_queues + num_events + num_svm_ranges;

View File

@ -1350,6 +1350,7 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
kfd_unref_process(p);
return;
}

View File

@ -279,20 +279,17 @@ static int init_user_queue(struct process_queue_manager *pqm,
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
>> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
pr_err("Queue memory allocated to wrong device\n");
retval = -EINVAL;
goto free_gang_ctx_bo;
}
if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
pr_err("Queue memory allocated to wrong device\n");
retval = -EINVAL;
goto free_gang_ctx_bo;
}
retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
&(*q)->wptr_bo_gart);
if (retval) {
pr_err("Failed to map wptr bo to GART\n");
goto free_gang_ctx_bo;
}
retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
&(*q)->wptr_bo_gart);
if (retval) {
pr_err("Failed to map wptr bo to GART\n");
goto free_gang_ctx_bo;
}
}

View File

@ -4075,8 +4075,8 @@ exit:
return ret;
}
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size)
void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size)
{
uint64_t total_size, accessibility_size, common_attr_size;
int nattr_common = 4, nattr_accessibility = 1;
@ -4088,8 +4088,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
*svm_priv_data_size = 0;
svms = &p->svms;
if (!svms)
return -EINVAL;
mutex_lock(&svms->lock);
list_for_each_entry(prange, &svms->list, list) {
@ -4131,7 +4129,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
*svm_priv_data_size);
return 0;
}
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@ -4148,8 +4145,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p,
struct mm_struct *mm;
svms = &p->svms;
if (!svms)
return -EINVAL;
mm = get_task_mm(p->lead_thread);
if (!mm) {

View File

@ -184,8 +184,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_dma_unmap(struct svm_range *prange);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_offset);
@ -237,13 +237,12 @@ static inline int svm_range_schedule_evict_svm_bo(
return -EINVAL;
}
static inline int svm_range_get_info(struct kfd_process *p,
uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size)
static inline void svm_range_get_info(struct kfd_process *p,
uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size)
{
*num_svm_ranges = 0;
*svm_priv_data_size = 0;
return 0;
}
static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,

View File

@ -676,21 +676,15 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
acrtc->dm_irq_params.vrr_params.supported) {
bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
acrtc->dm_irq_params.vrr_params.supported &&
acrtc->dm_irq_params.freesync_config.state ==
VRR_STATE_ACTIVE_VARIABLE) {
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
dc_stream_adjust_vmin_vmax(adev->dm.dc,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
}
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
}
/*
@ -2006,8 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) {
adev->dm.dc->debug.force_disable_subvp = true;
adev->dm.dc->debug.fams2_config.bits.enable = false;
}
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
@ -2020,6 +2016,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK)
adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true;
if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT)
adev->dm.dc->debug.skip_detection_link_training = true;
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@ -4911,6 +4910,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct backlight_properties props = { 0 };
struct amdgpu_dm_backlight_caps caps = { 0 };
char bl_name[16];
int min, max;
if (aconnector->bl_idx == -1)
return;
@ -4923,11 +4923,15 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
}
amdgpu_acpi_get_backlight_caps(&caps);
if (caps.caps_valid) {
if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
props.brightness = caps.ac_level;
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100);
else
props.brightness = caps.dc_level;
props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100);
/* min is zero, so max needs to be adjusted */
props.max_brightness = max - min;
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
caps.ac_level, caps.dc_level);
} else
props.brightness = AMDGPU_MAX_BL_LEVEL;

View File

@ -246,8 +246,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
int r;
mutex_lock(&dm->dc_lock);
@ -275,15 +273,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
if (dm->active_vblank_irq_count == 0) {
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
if (dm->active_vblank_irq_count == 0)
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);

View File

@ -1393,7 +1393,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each set of clock frequencies*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
/*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;
@ -1407,7 +1407,7 @@ static void calculate_bandwidth(
if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
/*determine the minimum dram clock change margin for each display pipe*/
data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
/*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
/*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/
data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) {
data->display_pstate_change_enable[k] = 1;

View File

@ -62,7 +62,7 @@ static void dal_hw_hpd_destroy(
*ptr = NULL;
}
static enum gpio_result get_value(
static enum gpio_result dal_hw_hpd_get_value(
const struct hw_gpio_pin *ptr,
uint32_t *value)
{
@ -85,7 +85,7 @@ static enum gpio_result get_value(
return dal_hw_gpio_get_value(ptr, value);
}
static enum gpio_result set_config(
static enum gpio_result dal_hw_hpd_set_config(
struct hw_gpio_pin *ptr,
const struct gpio_config_data *config_data)
{
@ -104,9 +104,9 @@ static enum gpio_result set_config(
static const struct hw_gpio_pin_funcs funcs = {
.destroy = dal_hw_hpd_destroy,
.open = dal_hw_gpio_open,
.get_value = get_value,
.get_value = dal_hw_hpd_get_value,
.set_value = dal_hw_gpio_set_value,
.set_config = set_config,
.set_config = dal_hw_hpd_set_config,
.change_mode = dal_hw_gpio_change_mode,
.close = dal_hw_gpio_close,
};

View File

@ -952,8 +952,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t pwrseq_instance = 0;
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@ -1069,7 +1069,8 @@ void dce110_edp_backlight_control(
if (!enable) {
/*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
msleep(pre_T11_delay);
if (pre_T11_delay)
msleep(pre_T11_delay);
}
}
@ -1220,6 +1221,9 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
if (hws && hws->wa_state.skip_blank_stream)
return;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
if (!link->skip_implict_edp_power_control)
hws->funcs.edp_backlight_control(link, false);

View File

@ -76,6 +76,7 @@ void dcn20_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth"
@ -89,15 +90,15 @@ void dcn20_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
if (dpp->funcs->dpp_get_gamut_remap) {
dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
is_gamut_remap_available = true;
}
if (!s.is_enabled)
continue;
DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld",
DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s",
dpp->inst,
(s.dgam_lut_mode == 0) ? "Bypass" :
((s.dgam_lut_mode == 1) ? "sRGB" :
@ -114,10 +115,17 @@ void dcn20_log_color_state(struct dc *dc,
(s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 1) ? "RAM A" :
((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"));
if (is_gamut_remap_available) {
DTN_INFO(" %12s "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld",
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
s.gamut_remap.temperature_matrix[0].value,
s.gamut_remap.temperature_matrix[1].value,
s.gamut_remap.temperature_matrix[2].value,
@ -130,6 +138,8 @@ void dcn20_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
}
DTN_INFO("\n");
}
DTN_INFO("\n");

View File

@ -74,6 +74,7 @@ void dcn30_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode"
@ -88,16 +89,16 @@ void dcn30_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
if (dpp->funcs->dpp_get_gamut_remap) {
dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
is_gamut_remap_available = true;
}
if (!s.is_enabled)
continue;
DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s"
" %12s "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld",
DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s",
dpp->inst,
s.pre_dgam_mode,
(s.pre_dgam_select == 0) ? "sRGB" :
@ -121,7 +122,14 @@ void dcn30_log_color_state(struct dc *dc,
(s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
(s.rgam_lut_mode == 0) ? "Bypass" :
((s.rgam_lut_mode == 1) ? "RAM A" :
"RAM B"),
"RAM B"));
if (is_gamut_remap_available) {
DTN_INFO(" %12s "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld "
"%010lld %010lld %010lld %010lld",
(s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
"SW"),
@ -137,6 +145,8 @@ void dcn30_log_color_state(struct dc *dc,
s.gamut_remap.temperature_matrix[9].value,
s.gamut_remap.temperature_matrix[10].value,
s.gamut_remap.temperature_matrix[11].value);
}
DTN_INFO("\n");
}
DTN_INFO("\n");

View File

@ -526,9 +526,15 @@ static void dcn31_reset_back_end_for_pipe(
link = pipe_ctx->stream->link;
if (dc->hwseq)
dc->hwseq->wa_state.skip_blank_stream = false;
if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
(link->connector_signal == SIGNAL_TYPE_EDP))
(link->connector_signal == SIGNAL_TYPE_EDP)) {
dc->hwss.blank_stream(pipe_ctx);
if (dc->hwseq)
dc->hwseq->wa_state.skip_blank_stream = true;
}
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
@ -570,7 +576,8 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.audio = NULL;
}
}
if (dc->hwseq)
dc->hwseq->wa_state.skip_blank_stream = false;
pipe_ctx->stream = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);

View File

@ -49,6 +49,7 @@ struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
bool disallow_self_refresh_during_multi_plane_transition_applied;
unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
bool skip_blank_stream;
};
struct pipe_ctx;

View File

@ -2023,7 +2023,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
if (status != DC_OK)
dm_error("%s: Read DP tunneling device data failed.\n", __func__);
DC_LOG_DP2("%s: Read DP tunneling device data failed.\n", __func__);
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);

View File

@ -502,7 +502,7 @@ void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *ref
REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate);
}
static struct timing_generator_funcs dcn20_tg_funcs = {
static const struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -129,7 +129,7 @@ static void optc201_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 1;
}
static struct timing_generator_funcs dcn201_tg_funcs = {
static const struct timing_generator_funcs dcn201_tg_funcs = {
.validate_timing = optc201_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -357,7 +357,7 @@ void optc3_tg_init(struct timing_generator *optc)
optc1_clear_optc_underflow(optc);
}
static struct timing_generator_funcs dcn30_tg_funcs = {
static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -109,7 +109,7 @@ void optc301_setup_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_CLEAR, 1);
}
static struct timing_generator_funcs dcn30_tg_funcs = {
static const struct timing_generator_funcs dcn30_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -315,7 +315,7 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
static struct timing_generator_funcs dcn31_tg_funcs = {
static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -192,7 +192,7 @@ static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc,
}
static struct timing_generator_funcs dcn314_tg_funcs = {
static const struct timing_generator_funcs dcn314_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -297,7 +297,7 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
static struct timing_generator_funcs dcn32_tg_funcs = {
static const struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -428,7 +428,7 @@ static void optc35_set_long_vtotal(
}
}
static struct timing_generator_funcs dcn35_tg_funcs = {
static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -459,7 +459,7 @@ bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
return true;
}
static struct timing_generator_funcs dcn401_tg_funcs = {
static const struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,

View File

@ -1938,8 +1938,8 @@ static bool dcn401_resource_construct(
dc->caps.color.dpp.gamma_corr = 1;
dc->caps.color.dpp.dgam_rom_for_yuv = 0;
dc->caps.color.dpp.hw_3d_lut = 1;
dc->caps.color.dpp.ogam_ram = 1;
dc->caps.color.dpp.hw_3d_lut = 0;
dc->caps.color.dpp.ogam_ram = 0;
// no OGAM ROM on DCN2 and later ASICs
dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;

View File

@ -2139,11 +2139,6 @@ union dmub_cmd_fams2_config {
} stream_v1; //v1
};
struct dmub_fams2_config_v2 {
struct dmub_cmd_fams2_global_config global;
struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1
};
/**
* DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy)
*/
@ -2152,22 +2147,6 @@ struct dmub_rb_cmd_fams2 {
union dmub_cmd_fams2_config config;
};
/**
* Indirect buffer descriptor
*/
struct dmub_ib_data {
union dmub_addr src; // location of indirect buffer in memory
uint16_t size; // indirect buffer size in bytes
};
/**
* DMUB rb command definition for commands passed over indirect buffer
*/
struct dmub_rb_cmd_ib {
struct dmub_cmd_header header;
struct dmub_ib_data ib_data;
};
/**
* enum dmub_cmd_idle_opt_type - Idle optimization command type.
*/
@ -2191,11 +2170,6 @@ enum dmub_cmd_idle_opt_type {
* DCN hardware notify power state.
*/
DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
/**
* DCN notify to release HW.
*/
DMUB_CMD__IDLE_OPT_RELEASE_HW = 4,
};
/**
@ -2957,9 +2931,8 @@ enum dmub_cmd_fams_type {
*/
DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3,
DMUB_CMD__FAMS2_CONFIG = 4,
DMUB_CMD__FAMS2_IB_CONFIG = 5,
DMUB_CMD__FAMS2_DRR_UPDATE = 6,
DMUB_CMD__FAMS2_FLIP = 7,
DMUB_CMD__FAMS2_DRR_UPDATE = 5,
DMUB_CMD__FAMS2_FLIP = 6,
};
/**
@ -5953,11 +5926,8 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__PSP_ASSR_ENABLE command.
*/
struct dmub_rb_cmd_assr_enable assr_enable;
struct dmub_rb_cmd_fams2 fams2_config;
struct dmub_rb_cmd_ib ib_fams2_config;
struct dmub_rb_cmd_fams2_drr_update fams2_drr_update;
struct dmub_rb_cmd_fams2_flip fams2_flip;

View File

@ -424,7 +424,7 @@ struct integrated_info {
/*
* DFS-bypass flag
*/
/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
/* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */
enum {
DFS_BYPASS_ENABLE = 0x10
};

View File

@ -368,6 +368,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
struct mod_hdcp_display *display = get_first_active_display(hdcp);
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));

View File

@ -351,9 +351,10 @@ enum DC_DEBUG_MASK {
DC_DISABLE_HDMI_CEC = 0x10000,
/**
* @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
* @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
* Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
DC_DISABLE_SUBVP = 0x20000,
DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
* @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
*/
@ -370,6 +371,11 @@ enum DC_DEBUG_MASK {
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
/**
* @DC_SKIP_DETECTION_LT: If set, skip detection link training
*/
DC_SKIP_DETECTION_LT = 0x200000,
};
enum amd_dpm_forced_level;

View File

@ -9776,6 +9776,14 @@
#define regDIG0_DIG_BE_CNTL_BASE_IDX 2
#define regDIG0_DIG_BE_EN_CNTL 0x20bd
#define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
#define regDIG0_HDCP_INT_CONTROL 0x20c0
#define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2
#define regDIG0_HDCP_LINK0_STATUS 0x20c1
#define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2
#define regDIG0_HDCP_I2C_CONTROL_0 0x20c2
#define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2
#define regDIG0_HDCP_I2C_CONTROL_1 0x20c3
#define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG0_TMDS_CNTL 0x20e4
#define regDIG0_TMDS_CNTL_BASE_IDX 2
#define regDIG0_TMDS_CONTROL_CHAR 0x20e5
@ -10081,6 +10089,12 @@
#define regDIG1_DIG_BE_CNTL_BASE_IDX 2
#define regDIG1_DIG_BE_EN_CNTL 0x21e1
#define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
#define regDIG1_HDCP_INT_CONTROL 0x21e4
#define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2
#define regDIG1_HDCP_I2C_CONTROL_0 0x21e6
#define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2
#define regDIG1_HDCP_I2C_CONTROL_1 0x21e7
#define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG1_TMDS_CNTL 0x2208
#define regDIG1_TMDS_CNTL_BASE_IDX 2
#define regDIG1_TMDS_CONTROL_CHAR 0x2209
@ -10386,6 +10400,12 @@
#define regDIG2_DIG_BE_CNTL_BASE_IDX 2
#define regDIG2_DIG_BE_EN_CNTL 0x2305
#define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2
#define regDIG2_HDCP_INT_CONTROL 0x2308
#define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2
#define regDIG2_HDCP_I2C_CONTROL_0 0x230a
#define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2
#define regDIG2_HDCP_I2C_CONTROL_1 0x230b
#define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG2_TMDS_CNTL 0x232c
#define regDIG2_TMDS_CNTL_BASE_IDX 2
#define regDIG2_TMDS_CONTROL_CHAR 0x232d
@ -10691,6 +10711,12 @@
#define regDIG3_DIG_BE_CNTL_BASE_IDX 2
#define regDIG3_DIG_BE_EN_CNTL 0x2429
#define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2
#define regDIG3_HDCP_INT_CONTROL 0x242c
#define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2
#define regDIG3_HDCP_I2C_CONTROL_0 0x242e
#define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2
#define regDIG3_HDCP_I2C_CONTROL_1 0x242f
#define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2
#define regDIG3_TMDS_CNTL 0x2450
#define regDIG3_TMDS_CNTL_BASE_IDX 2
#define regDIG3_TMDS_CONTROL_CHAR 0x2451

View File

@ -2847,6 +2847,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe
@ -2871,6 +2879,14 @@
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L
#define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L

View File

@ -1067,7 +1067,13 @@
#define regVCN_FEATURES_BASE_IDX 1
#define regUVD_GPUIOV_STATUS 0x0055
#define regUVD_GPUIOV_STATUS_BASE_IDX 1
#define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057
#define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15 0x005c
#define regUVD_RAS_JPEG0_STATUS 0x0059
#define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1
#define regUVD_RAS_JPEG1_STATUS 0x005a
#define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1
#define regUVD_SCRATCH15_BASE_IDX 1
#define regUVD_VERSION 0x005d
#define regUVD_VERSION_BASE_IDX 1

View File

@ -5714,6 +5714,22 @@
//UVD_GPUIOV_STATUS
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0
#define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L
//UVD_RAS_VCPU_VCODEC_STATUS
#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_RAS_JPEG0_STATUS
#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_RAS_JPEG1_STATUS
#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
//UVD_SCRATCH15
#define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0
#define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL

View File

@ -6017,7 +6017,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
#define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000
@ -6460,7 +6460,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
// ulGPUCapInfo
#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
#define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10
//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
//ulGPUCapInfo[18]=1 indicate the IOMMU is not available

View File

@ -1714,7 +1714,7 @@ enum atom_system_vbiosmisc_def{
// gpucapinfo
enum atom_system_gpucapinf_def{
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10,
SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10,
};
//dpphy_override

View File

@ -48,6 +48,7 @@
#define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
#define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence
#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __IRQSRCS_GFX_12_0_0_H__
#define __IRQSRCS_GFX_12_0_0_H__
#define GFX_12_0_0__SRCID__UTCL2_FAULT 0 // UTCL2 has encountered a fault or retry scenario
#define GFX_12_0_0__SRCID__UTCL2_DATA_POISONING 1 // UTCL2 for data poisoning
#define GFX_12_0_0__SRCID__MEM_ACCES_MON 10 // 0x0A EA memory access monitor interrupt
#define GFX_12_0_0__SRCID__SDMA_ATOMIC_RTN_DONE 48 // 0x30 SDMA atomic*_rtn ops complete
#define GFX_12_0_0__SRCID__SDMA_TRAP 49 // 0x31 Trap
#define GFX_12_0_0__SRCID__SDMA_SRBMWRITE 50 // 0x32 SRBM write Protection
#define GFX_12_0_0__SRCID__SDMA_CTXEMPTY 51 // 0x33 Context Empty
#define GFX_12_0_0__SRCID__SDMA_PREEMPT 52 // 0x34 SDMA New Run List
#define GFX_12_0_0__SRCID__SDMA_IB_PREEMPT 53 // 0x35 sdma mid - command buffer preempt interrupt
#define GFX_12_0_0__SRCID__SDMA_DOORBELL_INVALID 54 // 0x36 Doorbell BE invalid
#define GFX_12_0_0__SRCID__SDMA_QUEUE_HANG 55 // 0x37 Queue hang or Command timeout
#define GFX_12_0_0__SRCID__SDMA_ATOMIC_TIMEOUT 56 // 0x38 SDMA atomic CMPSWAP loop timeout
#define GFX_12_0_0__SRCID__SDMA_POLL_TIMEOUT 57 // 0x39 SRBM read poll timeout
#define GFX_12_0_0__SRCID__SDMA_PAGE_TIMEOUT 58 // 0x3A Page retry timeout after UTCL2 return nack = 1
#define GFX_12_0_0__SRCID__SDMA_PAGE_NULL 59 // 0x3B Page Null from UTCL2 when nack = 2
#define GFX_12_0_0__SRCID__SDMA_PAGE_FAULT 60 // 0x3C Page Fault Error from UTCL2 when nack = 3
#define GFX_12_0_0__SRCID__SDMA_VM_HOLE 61 // 0x3D MC or SEM address in VM hole
#define GFX_12_0_0__SRCID__SDMA_ECC 62 // 0x3E ECC Error
#define GFX_12_0_0__SRCID__SDMA_FROZEN 63 // 0x3F SDMA Frozen
#define GFX_12_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error
#define GFX_12_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_12_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
#define GFX_12_0_0__SRCID__SDMA_FENCE 70 // 0x46 User fence
#define GFX_12_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
#define GFX_12_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
#define GFX_12_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
#define GFX_12_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
#define GFX_12_0_0__SRCID__CP_BAD_OPCODE_ERROR 183 // 0xB7 Bad Opcode Error
#define GFX_12_0_0__SRCID__CP_PRIV_REG_FAULT 184 // 0xB8 Privileged Register Fault
#define GFX_12_0_0__SRCID__CP_PRIV_INSTR_FAULT 185 // 0xB9 Privileged Instr Fault
#define GFX_12_0_0__SRCID__CP_WAIT_MEM_SEM_FAULT 186 // 0xBA Wait Memory Semaphore Fault (Sync Object Fault)
#define GFX_12_0_0__SRCID__CP_CTX_EMPTY_INTERRUPT 187 // 0xBB Context Empty Interrupt
#define GFX_12_0_0__SRCID__CP_CTX_BUSY_INTERRUPT 188 // 0xBC Context Busy Interrupt
#define GFX_12_0_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT 192 // 0xC0 CP.ME Wait_Reg_Mem Poll Timeout
#define GFX_12_0_0__SRCID__CP_SIG_INCOMPLETE 193 // 0xC1 "Surface Probe Fault Signal Incomplete"
#define GFX_12_0_0__SRCID__CP_PREEMPT_ACK 194 // 0xC2 Preemption Ack-wledge
#define GFX_12_0_0__SRCID__CP_GPF 195 // 0xC3 General Protection Fault (GPF)
#define GFX_12_0_0__SRCID__CP_GDS_ALLOC_ERROR 196 // 0xC4 GDS Alloc Error
#define GFX_12_0_0__SRCID__CP_ECC_ERROR 197 // 0xC5 ECC Error
#define GFX_12_0_0__SRCID__CP_COMPUTE_QUERY_STATUS 199 // 0xC7 Compute query status
#define GFX_12_0_0__SRCID__CP_VM_DOORBELL 200 // 0xC8 Unattached VM Doorbell Received
#define GFX_12_0_0__SRCID__CP_FUE_ERROR 201 // 0xC9 ECC FUE Error
#define GFX_12_0_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT 202 // 0xCA Streaming Perf Monitor Interrupt
#define GFX_12_0_0__SRCID__GRBM_RD_TIMEOUT_ERROR 232 // 0xE8 CRead timeout error
#define GFX_12_0_0__SRCID__GRBM_REG_GUI_IDLE 233 // 0xE9 Register GUI Idle
#define GFX_12_0_0__SRCID__SQ_INTERRUPT_ID 239 // 0xEF SQ Interrupt (ttrace wrap, errors)
#endif

View File

@ -494,6 +494,7 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
struct pp_smu_wm_range_sets *ranges);
@ -1592,4 +1593,27 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
struct amdgpu_partition_metrics_v1_0 {
struct metrics_table_header common_header;
/* Current clocks (Mhz) */
uint16_t current_gfxclk[MAX_XCC];
uint16_t current_socclk[MAX_CLKS];
uint16_t current_vclk0[MAX_CLKS];
uint16_t current_dclk0[MAX_CLKS];
uint16_t current_uclk;
uint16_t padding;
/* Utilization Instantaneous (%) */
uint32_t gfx_busy_inst[MAX_XCC];
uint16_t jpeg_busy[NUM_JPEG_ENG_V1];
uint16_t vcn_busy[NUM_VCN];
/* Utilization Accumulated (%) */
uint64_t gfx_busy_acc[MAX_XCC];
/* Total App Clock Counter Accumulated */
uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC];
uint64_t gfx_below_host_limit_thm_acc[MAX_XCC];
uint64_t gfx_low_utilization_acc[MAX_XCC];
uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
};
#endif

View File

@ -1697,6 +1697,28 @@ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
}
}
int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev)
{
if (is_support_sw_smu(adev)) {
struct smu_context *smu = adev->powerplay.pp_handle;
return smu->od_enabled;
} else {
struct pp_hwmgr *hwmgr;
/*
* dpm on some legacy asics don't carry od_enabled member
* as its pp_handle is casted directly from adev.
*/
if (amdgpu_dpm_is_legacy_dpm(adev))
return false;
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
return hwmgr->od_enabled;
}
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size)
@ -2019,3 +2041,35 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
return ret;
}
/**
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
* partition
* @adev: Pointer to the device.
* @xcp_id: Identifier of the XCP for which metrics are to be retrieved.
* @table: Pointer to a buffer where the metrics will be stored. If NULL, the
* function returns the size of the metrics structure.
*
* This function retrieves metrics for a specific XCP, including details such as
* VCN/JPEG activity, clock frequencies, and other performance metrics. If the
* table parameter is NULL, the function returns the size of the metrics
* structure without populating it.
*
* Return: Size of the metrics structure on success, or a negative error code on failure.
*/
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
if (!pp_funcs->get_xcp_metrics)
return 0;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id,
table);
mutex_unlock(&adev->pm.mutex);
return ret;
}

View File

@ -524,6 +524,8 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@ -561,6 +563,7 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
void **addr,
size_t *size);
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
int amdgpu_dpm_is_overdrive_enabled(struct amdgpu_device *adev);
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const char *buf,
size_t size);

View File

@ -2594,7 +2594,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev)
le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
}
if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS)
pi->caps_enable_dfs_bypass = true;
sumo_construct_sclk_voltage_mapping_table(adev,

View File

@ -394,7 +394,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
}
if (le32_to_cpu(info->ulGPUCapInfo) &
SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableDFSBypass);
}

View File

@ -3758,6 +3758,19 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
{
struct smu_context *smu = handle;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
return -EOPNOTSUPP;
return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
}
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
.set_fan_control_mode = smu_set_fan_control_mode,
@ -3816,6 +3829,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
.get_xcp_metrics = smu_sys_get_xcp_metrics,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,

View File

@ -1466,6 +1466,12 @@ struct pptable_funcs {
*/
int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
struct freq_band_range *exclusion_ranges);
/**
* @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
* Return: Size of table
*/
ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
void *table);
};
typedef enum {

View File

@ -127,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
#define SMU_METRICS_TABLE_VERSION 0x10
#define SMU_METRICS_TABLE_VERSION 0x11
// Unified metrics table for smu_v13_0_6
typedef struct __attribute__((packed, aligned(4))) {
@ -463,6 +463,8 @@ typedef struct __attribute__((packed, aligned(4))) {
typedef struct {
// Telemetry
uint32_t InputTelemetryVoltageInmV;
// General info
uint32_t pldmVersion[2];
} StaticMetricsTable_t;
#pragma pack(pop)

Some files were not shown because too many files have changed in this diff Show More