Driver Changes:

- Clear LMTT page to avoid leaking data from one VF to another
 - Align PF queue size to power of 2
 - Disable Indirect Ring State to avoid intermittent issues on context
   switch: feature is not currently needed, so can be disabled for now.
 - Fix compression handling when the BO pages are very fragmented
 - Restore display pm on error path
 - Fix runtime pm handling in xe devcoredump
 - Fix xe_pm_set_vram_threshold() doc
 - Recommend new minor versions of GuC firmware
 - Drop some workarounds on VF
 - Do not use verbose GuC logging by default: it should be only for
   debugging
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCgA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmhwnQgZHGx1Y2FzLmRl
 bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqU3/QD/9TtgEyP+On1K1T8pYBY6I2
 RuhAQ5OUs/7H4A0boO/+ZZRCqF8nuEfHy7FqWSYnO1IlRvmFfvjWyMJsJmMCcj2X
 t1ZMbrT6DiSAGGxf8F+euRPAKCCRltqLJ2dfGDIBerW1CMFpA5lepNSjrMyGpQ93
 R9IfUzW8h8yexW7xGfUjgV2MCs/14oNQW79c5LFjkfVU+8ILHP1a8EZMeWmR310R
 NqDfBqvHhKxBQguhbGzIYUOdKTBDr7McZ9A8fZ9nzp4GYIb/j6AbKxlyZgDIgM2b
 ahGWeLqqM7PnNvGs0r+vdESuVDkzo9tYw6MfHRHhLGmq+kYSm9w95p9SKsVNtRft
 K/w3SXjIS4e0hPpqBEOC5ANfSvValzBwltJDFLd9dG+fPEAarbi2AAYQjnoUf73n
 EK4DQ1K44T2kYlf27UQxaXE2LYvO2h6nv1iJ9FvAuaUznP6Za6zeVQqfMyvLaBXg
 gpAkvJG5QY6y7II9lRaCMNR8tb6IJwOgdvEynHgKsmwv3iHUfmdqRfTx5Eb1ECe8
 rk9J0R4SIuYkhu1qUxJx4qrBX386v3ERiof8fjNitoTi6ITcgBoPiG9QxbmS1L1f
 Qhv8HM7ebPQgc8Eyy8dc1ZrknFqGe0/MggQDHmKgMU3t6jisiBWpJ/qJv4gAqwMk
 551lBdaKfDKGSrZ8hkUqaA==
 =HUb3
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-fixes-2025-07-11' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

Driver Changes:
- Clear LMTT page to avoid leaking data from one VF to another
- Align PF queue size to power of 2
- Disable Indirect Ring State to avoid intermittent issues on context
  switch: feature is not currently needed, so can be disabled for now.
- Fix compression handling when the BO pages are very fragmented
- Restore display pm on error path
- Fix runtime pm handling in xe devcoredump
- Fix xe_pm_set_vram_threshold() doc
- Recommend new minor versions of GuC firmware
- Drop some workarounds on VF
- Do not use verbose GuC logging by default: it should be only for
  debugging

Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/s6jyd24mimbzb4vxtgc5vupvbyqplfep2c6eupue7znnlbhuxy@lmvzexfzhrnn
This commit is contained in:
Simona Vetter 2025-07-11 11:35:39 +02:00
commit 14e85fabee
9 changed files with 53 additions and 23 deletions

View File

@ -171,14 +171,32 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
/**
* xe_devcoredump_read() - Read data from the Xe device coredump snapshot
* @buffer: Destination buffer to copy the coredump data into
* @offset: Offset in the coredump data to start reading from
* @count: Number of bytes to read
* @data: Pointer to the xe_devcoredump structure
* @datalen: Length of the data (unused)
*
* Reads a chunk of the coredump snapshot data into the provided buffer.
* If the devcoredump is smaller than 1.5 GB (XE_DEVCOREDUMP_CHUNK_MAX),
* it is read directly from a pre-written buffer. For larger devcoredumps,
* the pre-written buffer must be periodically repopulated from the snapshot
* state due to kmalloc size limitations.
*
* Return: Number of bytes copied on success, or a negative error code on failure.
*/
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
ssize_t byte_copied;
ssize_t byte_copied = 0;
u32 chunk_offset;
ssize_t new_chunk_position;
bool pm_needed = false;
int ret = 0;
if (!coredump)
return -ENODEV;
@ -188,20 +206,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
pm_needed = ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX;
if (pm_needed)
xe_pm_runtime_get(gt_to_xe(ss->gt));
mutex_lock(&coredump->lock);
if (!ss->read.buffer) {
mutex_unlock(&coredump->lock);
return -ENODEV;
ret = -ENODEV;
goto unlock;
}
if (offset >= ss->read.size) {
mutex_unlock(&coredump->lock);
return 0;
}
if (offset >= ss->read.size)
goto unlock;
new_chunk_position = div_u64_rem(offset,
XE_DEVCOREDUMP_CHUNK_MAX,
@ -221,12 +238,13 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
ss->read.size - offset;
memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
unlock:
mutex_unlock(&coredump->lock);
if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
if (pm_needed)
xe_pm_runtime_put(gt_to_xe(ss->gt));
return byte_copied;
return byte_copied ? byte_copied : ret;
}
static void xe_devcoredump_free(void *data)

View File

@ -444,6 +444,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
#define PF_MULTIPLIER 8
pf_queue->num_dw =
(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw);
#undef PF_MULTIPLIER
pf_queue->gt = gt;

View File

@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
}
lmtt_assert(lmtt, xe_bo_is_vram(bo));
lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE));
xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size);
pt->level = level;
pt->bo = bo;
@ -91,6 +94,9 @@ out:
static void lmtt_pt_free(struct xe_lmtt_pt *pt)
{
lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n",
pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE));
xe_bo_unpin_map_no_vm(pt->bo);
kfree(pt);
}
@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
switch (lmtt->ops->lmtt_pte_size(level)) {
case sizeof(u32):
lmtt_assert(lmtt, !overflows_type(pte, u32));
lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32));
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
break;
case sizeof(u64):
lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64));
xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
break;
default:

View File

@ -863,7 +863,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
xe_res_next(&src_it, src_L0);
else
emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat,
&src_it, src_L0, src);
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))

View File

@ -20,7 +20,7 @@
struct xe_modparam xe_modparam = {
.probe_display = true,
.guc_log_level = 3,
.guc_log_level = IS_ENABLED(CONFIG_DRM_XE_DEBUG) ? 3 : 1,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
.svm_notifier_size = 512,

View File

@ -140,7 +140,6 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
.has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \

View File

@ -134,7 +134,7 @@ int xe_pm_suspend(struct xe_device *xe)
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
goto err_pxp;
goto err_display;
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
@ -151,7 +151,6 @@ int xe_pm_suspend(struct xe_device *xe)
err_display:
xe_display_pm_resume(xe);
err_pxp:
xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
@ -753,11 +752,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
}
/**
* xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
* xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold
* @xe: xe device instance
* @threshold: VRAM size in bites for the D3cold threshold
* @threshold: VRAM size in MiB for the D3cold threshold
*
* Returns 0 for success, negative error code otherwise.
* Return:
* * 0 - success
* * -EINVAL - invalid argument
*/
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
{

View File

@ -114,10 +114,10 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \
fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \
fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \
fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \

View File

@ -38,10 +38,10 @@
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
no_media_l3 MEDIA_VERSION(3000)