drm fixes for 6.16-rc7

dp:
 - aux dpcd address fix
 
 xe:
 - SR-IOV fixes for GT reset and TLB invalidation
 - Fix memory copy direction during migration
 - Fix alignment check on migration
 - Fix MOCS and page fault init order to correctly
   account for topology
 
 amdgpu:
 - Fix a DC memory leak
 - DCN 4.0.1 degamma LUT fix
 - Fix reset counter handling for soft recovery
 - GC 8 fix
 
 radeon:
 - Drop console locks when suspending/resuming
 
 nouveau:
 - ioctl validation fix
 
 panfrost:
 - scheduler bug fix
 
 mediatek:
 - Add wait_event_timeout when disabling plane
 - only announce AFBC if really supported
 - mtk_dpi: Reorder output formats on MT8195/88
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmh6IjUACgkQDHTzWXnE
 hr5Mow/+J+FdXUi/5N8S6WUfk7tXY1+OQ0qOIx6px2QHc0yFR20ab6Izwjy1fjzf
 AmlgYexniWPqXO/4JimeDHP0Cn3OnsuH652lt0HSVE2WR/uo+dYvGwvGx/xk+0tE
 WjODlx2U4GHLmgPo7BRTB9vR+FMhKOVK5drVv1i1Vfnt6bhnNJF8kwGJHLa79koO
 JYhK8ngxP+JZAINPXp42HC3z/cArbvwUe5FcUrlIOA6xEw9sVSP452cI783bGGTV
 8CQoa5Z0rMOFKvbnAJDmDBMaIIUa8NddCDM0CyZ7EIVa2AMvcOoEL//BoHpn4X2E
 r8JRpsCRx7z734Hv/vK61zVrOlo3nqisWKWKIEXreDtjDCriSPC6BpyMabJJ07Ly
 giYtXSTJG2Lu69BEJsudITfU7YYwEGZ32W1k2rUmAZT47NwnFpsnInTcxd1mpa0H
 FDegXQHtfIhYZo59Woa62xBc/f7bxzlMdjYCHZ8T7G9up4Ofrt+UGjZJU6h5VpVN
 S9RT73Yw0HOJYX06IbK7HVCAXlOY+6eJ0mJYYpuz0pzanHDSFL22A99qjE7oVsUP
 /Yndij07yBDBNV8WY9gKJDbCQEBrPMmVF7l/6vSJqclstrxJV0mAx/H06kNtsDU1
 i0GrzTfkwK8fzsKZx3T6x7n32hREe22Srej8jlN8ezXBwWQP/ow=
 =VtzE
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2025-07-18-1' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Seems like a quiet enough week, xe/amdgpu being the usual suspects,
  then mediatek with a few fixes, and otherwise just misc other bits.

  dp:
   - aux dpcd address fix

  xe:
   - SR-IOV fixes for GT reset and TLB invalidation
   - Fix memory copy direction during migration
   - Fix alignment check on migration
   - Fix MOCS and page fault init order to correctly account
     for topology

  amdgpu:
   - Fix a DC memory leak
   - DCN 4.0.1 degamma LUT fix
   - Fix reset counter handling for soft recovery
   - GC 8 fix

  radeon:
   - Drop console locks when suspending/resuming

  nouveau:
   - ioctl validation fix

  panfrost:
   - scheduler bug fix

  mediatek:
   - Add wait_event_timeout when disabling plane
   - only announce AFBC if really supported
   - mtk_dpi: Reorder output formats on MT8195/88"

* tag 'drm-fixes-2025-07-18-1' of https://gitlab.freedesktop.org/drm/kernel:
  drm/mediatek: mtk_dpi: Reorder output formats on MT8195/88
  drm/mediatek: only announce AFBC if really supported
  drm/mediatek: Add wait_event_timeout when disabling plane
  drm/xe/pf: Resend PF provisioning after GT reset
  drm/xe/pf: Prepare to stop SR-IOV support prior GT reset
  drm/xe/migrate: Fix alignment check
  drm/xe: Move page fault init after topology init
  drm/xe/mocs: Initialize MOCS index early
  drm/xe/migrate: fix copy direction in access_memory
  drm/xe: Dont skip TLB invalidations on VF
  drm/amdgpu/gfx8: reset compute ring wptr on the GPU on resume
  drm/amdgpu: Increase reset counter only on success
  drm/radeon: Do not hold console lock during resume
  drm/radeon: Do not hold console lock while suspending clients
  drm/amd/display: Disable CRTC degamma LUT for DCN401
  drm/amd/display: Free memory allocation
  drm/dp: Change AUX DPCD probe address from LANE0_1_STATUS to TRAINING_PATTERN_SET
  drm/panfrost: Fix scheduler workqueue bug
  drm/nouveau: check ioctl command codes better
This commit is contained in:
Linus Torvalds 2025-07-18 11:27:46 -07:00
commit c460535a6d
23 changed files with 174 additions and 56 deletions

View File

@ -427,6 +427,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
bool ret;
if (unlikely(ring->adev->debug_disable_soft_recovery))
return false;
@ -441,12 +442,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
return dma_fence_is_signaled(fence);
ret = dma_fence_is_signaled(fence);
/* increment the counter only if soft reset worked */
if (ret)
atomic_inc(&ring->adev->gpu_reset_counter);
return ret;
}
/*

View File

@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;

View File

@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
* support programmable degamma anywhere.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
/* Dont't enable DRM CRTC degamma property for DCN401 since the
* pre-blending degamma LUT doesn't apply to cursor, and therefore
* can't work similar to a post-blending degamma LUT as in other hw
* versions.
* TODO: revisit it once KMS plane color API is merged.
*/
drm_crtc_enable_color_mgmt(&acrtc->base,
(is_dcn &&
dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);

View File

@ -1565,7 +1565,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr);
kfree(clk_mgr401);
return NULL;
}
@ -1576,6 +1576,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
kfree(clk_mgr401);
return NULL;
}

View File

@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
}

View File

@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
return 0;
}
void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
int i;
/* no need to wait for disabling the plane by CPU */
if (!mtk_crtc->cmdq_client.chan)
return;
if (!mtk_crtc->enabled)
return;
/* set pending plane state to disabled */
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
if (mtk_plane->index == plane->index) {
memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
break;
}
}
mtk_crtc_update_config(mtk_crtc, false);
/* wait for planes to be disabled by CMDQ */
wait_event_timeout(mtk_crtc->cb_blocking_queue,
mtk_crtc->cmdq_vblank_cnt == 0,
msecs_to_jiffies(500));
#endif
}
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp), i);
mtk_ddp_comp_get_num_formats(comp),
mtk_ddp_comp_is_afbc_supported(comp), i);
if (ret)
return ret;

View File

@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
unsigned int num_conn_routes);
int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane);
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *plane_state);
struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);

View File

@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
.is_afbc_supported = mtk_ovl_is_afbc_supported,
};
static const struct mtk_ddp_comp_funcs ddp_postmask = {

View File

@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs {
u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
bool (*is_afbc_supported)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*add)(struct device *dev, struct mtk_mutex *mutex);
@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp)
return 0;
}
static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->is_afbc_supported)
return comp->funcs->is_afbc_supported(comp->dev);
return false;
}
static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex)
{
if (comp->funcs && comp->funcs->add) {

View File

@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev);
u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
bool mtk_ovl_is_afbc_supported(struct device *dev);
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);

View File

@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev)
return ovl->data->num_formats;
}
bool mtk_ovl_is_afbc_supported(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->supports_afbc;
}
int mtk_ovl_clk_enable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);

View File

@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = {
};
static const u32 mt8195_dpi_output_fmts[] = {
MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = {
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
MEDIA_BUS_FMT_YUYV12_1X24,
MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};
static const u32 mt8195_dp_intf_output_fmts[] = {
MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};

View File

@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
const u32 *formats, size_t num_formats, unsigned int plane_idx)
const u32 *formats, size_t num_formats,
bool supports_afbc, unsigned int plane_idx)
{
int err;
@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
num_formats, modifiers, type, NULL);
num_formats,
supports_afbc ? modifiers : NULL,
type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;

View File

@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
const u32 *formats, size_t num_formats, unsigned int plane_idx);
const u32 *formats, size_t num_formats,
bool supports_afbc, unsigned int plane_idx);
#endif

View File

@ -1284,6 +1284,9 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
};
#define DRM_IOCTL_NOUVEAU_NVIF _IOC(_IOC_READ | _IOC_WRITE, DRM_IOCTL_BASE, \
DRM_COMMAND_BASE + DRM_NOUVEAU_NVIF, 0)
long
nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@ -1297,14 +1300,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:
if ((cmd & ~IOCSIZE_MASK) == DRM_IOCTL_NOUVEAU_NVIF)
ret = nouveau_abi16_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
break;
default:
else
ret = drm_ioctl(file, cmd, arg);
break;
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);

View File

@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
.timeout_wq = pfdev->reset.wq,
.name = "pan_js",
.dev = pfdev->dev,
};
@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
if (!pfdev->reset.wq)
return -ENOMEM;
args.timeout_wq = pfdev->reset.wq;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);

View File

@ -26,7 +26,6 @@
* Jerome Glisse
*/
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
pci_set_power_state(pdev, PCI_D3hot);
}
if (notify_clients) {
console_lock();
drm_client_dev_suspend(dev, true);
console_unlock();
}
if (notify_clients)
drm_client_dev_suspend(dev, false);
return 0;
}
@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (notify_clients) {
console_lock();
}
if (resume) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
if (notify_clients)
console_unlock();
if (pci_enable_device(pdev))
return -1;
}
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
if (notify_clients) {
drm_client_dev_resume(dev, true);
console_unlock();
}
if (notify_clients)
drm_client_dev_resume(dev, false);
return 0;
}

View File

@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
xe_mocs_init_early(gt);
return 0;
}
@ -630,12 +632,6 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
err = xe_gt_pagefault_init(gt);
if (err)
return err;
xe_mocs_init_early(gt);
err = xe_gt_sysfs_init(gt);
if (err)
return err;
@ -644,6 +640,10 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
err = xe_gt_pagefault_init(gt);
if (err)
return err;
err = xe_gt_idle_init(&gt->gtidle);
if (err)
return err;
@ -839,6 +839,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
}
if (IS_SRIOV_PF(gt_to_xe(gt)))
xe_gt_sriov_pf_stop_prepare(gt);
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);

View File

@ -172,6 +172,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
static void pf_cancel_restart(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
if (cancel_work_sync(&gt->sriov.pf.workers.restart))
xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
}
/**
* xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
* @gt: the &xe_gt
*
* This function can only be called on the PF.
*/
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
{
pf_cancel_restart(gt);
}
static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);

View File

@ -13,6 +13,7 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@ -29,6 +30,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
{
}
static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
}

View File

@ -2364,6 +2364,21 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
static int pf_push_self_config(struct xe_gt *gt)
{
int err;
err = pf_push_full_vf_config(gt, PFID);
if (err) {
xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
ERR_PTR(err));
return err;
}
xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
return 0;
}
static void fini_config(void *arg)
{
struct xe_gt *gt = arg;
@ -2387,9 +2402,17 @@ static void fini_config(void *arg)
int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
xe_gt_assert(gt, IS_SRIOV_PF(xe));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
err = pf_push_self_config(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (err)
return err;
return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
}
@ -2407,6 +2430,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
unsigned int fail = 0, skip = 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
pf_push_self_config(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
for (n = 1; n <= total_vfs; n++) {
if (xe_gt_sriov_pf_config_is_empty(gt, n))
skip++;

View File

@ -1817,8 +1817,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_assert_held(bo);
/* Use bounce buffer for small access and unaligned access */
if (len & XE_CACHELINE_MASK ||
((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
/*
@ -1848,7 +1848,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
(void *)ptr,
sizeof(bounce), 0);
sizeof(bounce), write);
if (err)
return err;
} else {

View File

@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
static int emit_flush_invalidate(u32 *dw, int i)
static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
dw[i++] = 0;
MI_FLUSH_IMM_DW;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
dw[i++] = val;
return i;
}
@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
static void emit_migration_job_gen12(struct xe_sched_job *job,
struct xe_lrc *lrc, u32 seqno)
{
u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
i = emit_copy_timestamp(lrc, dw, i);
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i);
i = emit_store_imm_ggtt(saddr, seqno, dw, i);
dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
/* XXX: Do we need this? Leaving for now. */
dw[i++] = preparser_disable(true);
i = emit_flush_invalidate(dw, i);
dw[i++] = preparser_disable(false);
}
dw[i++] = preparser_disable(true);
i = emit_flush_invalidate(saddr, seqno, dw, i);
dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);