mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-06 05:45:29 +02:00
drm fixes for 6.12-rc2
atomic: - Use correct type when reading damage rectangles display: - Fix kernel docs dp-mst: - Fix DSC decompression detection hdmi: - Fix infoframe size sched: - Update maintainers - Fix race condition whne queueing up jobs - Fix locking in drm_sched_entity_modify_sched() - Fix pointer deref if entity queue changes sysfb: - Disable sysfb if framebuffer parent device is unknown amdgpu: - DML2 fix - DSC fix - Dispclk fix - eDP HDR fix - IPS fix - TBT fix i915: - One fix for bitwise and logical "and" mixup in PM code xe: - Restore pci state on resume - Fix locking on submission, queue and vm - Fix UAF on queue destruction - Fix resource release on freq init error path - Use rw_semaphore to reduce contention on ASID->VM lookup - Fix steering for media on Xe2_HPM - Tuning updates to Xe2 - Resume TDR after GT reset to prevent jobs running forever - Move id allocation to avoid userspace using a guessed number to trigger UAF - Fix OA stream close preventing pbatch buffers to complete - Fix NPD when migrating memory on LNL - Fix memory leak when aborting binds panthor: - Fix locking - Set FOP_UNSIGNED_OFFSET in fops instance - Acquire lock in panthor_vm_prepare_map_op_ctx() - Avoid uninitialized variable in tick_ctx_cleanup() - Do not block scheduler queue if work is pending - Do not add write fences to the shared BOs vbox: - Fix VLA handling -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmb/YwgACgkQDHTzWXnE hr6VbxAAoq9FYTAdRPWzfG1HYpG96UyTh+IT6lz1bk/Hblxhi7oRdfmRy/bVPQYh vj+Q2xrnyS6JYhyfeDT2nU75tD3gvR1V/qSamxXS7c1nrqcb431DaMzuSQ5ST6MZ jrmob2TxlbXDDw70dxtiGCmSu0a9QInbelEamJQySKOdun0Il5C0LRZIBMpicDGc 7Y3eSpCIwgTSU6bnApGyOchppvzptiqBWGmhoIuACMOgXI8eaLUPqbROKEHlPe5g JIG603rRK7cf+on/KEwvgrd2ZO59fJZvmwFrM5yY5bOsDCwTIJ6mHhOmutUNQvmd G5n6ZFnVxlBRSVWCAqPRBgA405s/0wi2IQprilaPCu2qAXToBXAUpIHuuat5I/8b BVwurVRAGV/GSeg7E51H3o8cu/fcQr4aGNW4Ul6fS1G123ZuUISpcUp9IEnqG7nB 5PSnHapadb5Pu+7kwhbWUD4kONp16oEacZPhymlN+74Q6X3v8UVlK/YSyD6wq4fj 2s4TBWUmXmNxztNEkgJhyJORYQhZeBaD0PtPq8kSzMUCFj3Q7Wf0bAODhbnmdCw1 iPgxRd9+38IpfW621AROJUoTcyCaLtlSUvHWgFfska5CYnbtbKuNPBW6baQxeEHe Rhns01dZhNTIbKNEw37cfOf2DqcjpmRm4cVJj4xjZawxWYlNldk= =wcmz -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-10-04' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Weekly fixes, xe and amdgpu lead the way, with panthor, and few core components getting various fixes. Nothing seems too out of the ordinary. atomic: - Use correct type when reading damage rectangles display: - Fix kernel docs dp-mst: - Fix DSC decompression detection hdmi: - Fix infoframe size sched: - Update maintainers - Fix race condition whne queueing up jobs - Fix locking in drm_sched_entity_modify_sched() - Fix pointer deref if entity queue changes sysfb: - Disable sysfb if framebuffer parent device is unknown amdgpu: - DML2 fix - DSC fix - Dispclk fix - eDP HDR fix - IPS fix - TBT fix i915: - One fix for bitwise and logical "and" mixup in PM code xe: - Restore pci state on resume - Fix locking on submission, queue and vm - Fix UAF on queue destruction - Fix resource release on freq init error path - Use rw_semaphore to reduce contention on ASID->VM lookup - Fix steering for media on Xe2_HPM - Tuning updates to Xe2 - Resume TDR after GT reset to prevent jobs running forever - Move id allocation to avoid userspace using a guessed number to trigger UAF - Fix OA stream close preventing pbatch buffers to complete - Fix NPD when migrating memory on LNL - Fix memory leak when aborting binds panthor: - Fix locking - Set FOP_UNSIGNED_OFFSET in fops instance - Acquire lock in panthor_vm_prepare_map_op_ctx() - Avoid uninitialized variable in tick_ctx_cleanup() - Do not block scheduler queue if work is pending - Do not add write fences to the shared BOs vbox: - Fix VLA handling" * tag 'drm-fixes-2024-10-04' of https://gitlab.freedesktop.org/drm/kernel: (41 commits) drm/xe: Fix memory leak when aborting binds drm/xe: Prevent null pointer access in xe_migrate_copy drm/xe/oa: Don't reset OAC_CONTEXT_ENABLE on OA stream close drm/xe/queue: move xa_alloc to prevent UAF drm/xe/vm: move xa_alloc to prevent UAF drm/xe: Clean up VM / exec queue file lock usage. drm/xe: Resume TDR after GT reset drm/xe/xe2: Add performance tuning for L3 cache flushing drm/xe/xe2: Extend performance tuning to media GT drm/xe/mcr: Use Xe2_LPM steering tables for Xe2_HPM drm/xe: Use helper for ASID -> VM in GPU faults and access counters drm/xe: Convert to USM lock to rwsem drm/xe: use devm_add_action_or_reset() helper drm/xe: fix UAF around queue destruction drm/xe/guc_submit: add missing locking in wedged_fini drm/xe: Restore pci state upon resume drm/amd/display: Fix system hang while resume with TBT monitor drm/amd/display: Enable idle workqueue for more IPS modes drm/amd/display: Add HDR workaround for specific eDP drm/amd/display: avoid set dispclk to 0 ...
This commit is contained in:
commit
fe6fceceae
|
@ -181,7 +181,7 @@ Bridge Operations
|
||||||
Bridge Connector Helper
|
Bridge Connector Helper
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
|
.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c
|
||||||
:doc: overview
|
:doc: overview
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ MIPI-DSI bridge operation
|
||||||
Bridge Connector Helper Reference
|
Bridge Connector Helper Reference
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
|
.. kernel-doc:: drivers/gpu/drm/display/drm_bridge_connector.c
|
||||||
:export:
|
:export:
|
||||||
|
|
||||||
Panel-Bridge Helper Reference
|
Panel-Bridge Helper Reference
|
||||||
|
|
|
@ -7832,6 +7832,8 @@ F: drivers/gpu/drm/xlnx/
|
||||||
DRM GPU SCHEDULER
|
DRM GPU SCHEDULER
|
||||||
M: Luben Tuikov <ltuikov89@gmail.com>
|
M: Luben Tuikov <ltuikov89@gmail.com>
|
||||||
M: Matthew Brost <matthew.brost@intel.com>
|
M: Matthew Brost <matthew.brost@intel.com>
|
||||||
|
M: Danilo Krummrich <dakr@kernel.org>
|
||||||
|
M: Philipp Stanner <pstanner@redhat.com>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||||
|
|
|
@ -67,9 +67,11 @@ static bool sysfb_unregister(void)
|
||||||
void sysfb_disable(struct device *dev)
|
void sysfb_disable(struct device *dev)
|
||||||
{
|
{
|
||||||
struct screen_info *si = &screen_info;
|
struct screen_info *si = &screen_info;
|
||||||
|
struct device *parent;
|
||||||
|
|
||||||
mutex_lock(&disable_lock);
|
mutex_lock(&disable_lock);
|
||||||
if (!dev || dev == sysfb_parent_dev(si)) {
|
parent = sysfb_parent_dev(si);
|
||||||
|
if (!dev || !parent || dev == parent) {
|
||||||
sysfb_unregister();
|
sysfb_unregister();
|
||||||
disabled = true;
|
disabled = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -770,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */
|
||||||
|
if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) {
|
||||||
|
DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
link_index = notify->link_index;
|
link_index = notify->link_index;
|
||||||
link = adev->dm.dc->links[link_index];
|
link = adev->dm.dc->links[link_index];
|
||||||
dev = adev->dm.ddev;
|
dev = adev->dm.ddev;
|
||||||
|
@ -2026,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||||
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE)
|
if (adev->dm.dc->caps.ips_support &&
|
||||||
|
adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL)
|
||||||
adev->dm.idle_workqueue = idle_create_workqueue(adev);
|
adev->dm.idle_workqueue = idle_create_workqueue(adev);
|
||||||
|
|
||||||
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
|
if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
|
||||||
|
@ -6735,12 +6742,21 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||||
stream->signal == SIGNAL_TYPE_EDP) {
|
stream->signal == SIGNAL_TYPE_EDP) {
|
||||||
|
const struct dc_edid_caps *edid_caps;
|
||||||
|
unsigned int disable_colorimetry = 0;
|
||||||
|
|
||||||
|
if (aconnector->dc_sink) {
|
||||||
|
edid_caps = &aconnector->dc_sink->edid_caps;
|
||||||
|
disable_colorimetry = edid_caps->panel_patch.disable_colorimetry;
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// should decide stream support vsc sdp colorimetry capability
|
// should decide stream support vsc sdp colorimetry capability
|
||||||
// before building vsc info packet
|
// before building vsc info packet
|
||||||
//
|
//
|
||||||
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
|
||||||
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
|
stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
|
||||||
|
!disable_colorimetry;
|
||||||
|
|
||||||
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
|
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
|
||||||
tf = TRANSFER_FUNC_GAMMA_22;
|
tf = TRANSFER_FUNC_GAMMA_22;
|
||||||
|
|
|
@ -73,6 +73,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
|
||||||
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
|
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
|
||||||
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
||||||
break;
|
break;
|
||||||
|
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
|
||||||
|
DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id);
|
||||||
|
edid_caps->panel_patch.disable_colorimetry = true;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1027,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||||
int remaining_to_try = 0;
|
int remaining_to_try = 0;
|
||||||
int ret;
|
int ret;
|
||||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||||
|
int var_pbn;
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
if (vars[i + k].dsc_enabled
|
if (vars[i + k].dsc_enabled
|
||||||
|
@ -1057,13 +1058,18 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||||
|
var_pbn = vars[next_index].pbn;
|
||||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||||
ret = drm_dp_atomic_find_time_slots(state,
|
ret = drm_dp_atomic_find_time_slots(state,
|
||||||
params[next_index].port->mgr,
|
params[next_index].port->mgr,
|
||||||
params[next_index].port,
|
params[next_index].port,
|
||||||
vars[next_index].pbn);
|
vars[next_index].pbn);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
|
DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
|
||||||
|
__func__, __LINE__, next_index, ret);
|
||||||
|
vars[next_index].pbn = var_pbn;
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = drm_dp_mst_atomic_check(state);
|
ret = drm_dp_mst_atomic_check(state);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
|
@ -1071,15 +1077,18 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
||||||
vars[next_index].dsc_enabled = false;
|
vars[next_index].dsc_enabled = false;
|
||||||
vars[next_index].bpp_x16 = 0;
|
vars[next_index].bpp_x16 = 0;
|
||||||
} else {
|
} else {
|
||||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
|
DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index);
|
||||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
|
vars[next_index].pbn = var_pbn;
|
||||||
ret = drm_dp_atomic_find_time_slots(state,
|
ret = drm_dp_atomic_find_time_slots(state,
|
||||||
params[next_index].port->mgr,
|
params[next_index].port->mgr,
|
||||||
params[next_index].port,
|
params[next_index].port,
|
||||||
vars[next_index].pbn);
|
vars[next_index].pbn);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
|
DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n",
|
||||||
|
__func__, __LINE__, next_index, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tried[next_index] = true;
|
tried[next_index] = true;
|
||||||
remaining_to_try--;
|
remaining_to_try--;
|
||||||
|
|
|
@ -178,6 +178,7 @@ struct dc_panel_patch {
|
||||||
unsigned int skip_avmute;
|
unsigned int skip_avmute;
|
||||||
unsigned int mst_start_top_delay;
|
unsigned int mst_start_top_delay;
|
||||||
unsigned int remove_sink_ext_caps;
|
unsigned int remove_sink_ext_caps;
|
||||||
|
unsigned int disable_colorimetry;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dc_edid_caps {
|
struct dc_edid_caps {
|
||||||
|
|
|
@ -303,7 +303,6 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
|
||||||
if (project == dml_project_dcn35 ||
|
if (project == dml_project_dcn35 ||
|
||||||
project == dml_project_dcn351) {
|
project == dml_project_dcn351) {
|
||||||
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
|
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
|
||||||
policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
|
|
||||||
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
|
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
|
||||||
policy->UseOnlyMaxPrefetchModes = 1;
|
policy->UseOnlyMaxPrefetchModes = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||||
.disable_dmub_reallow_idle = false,
|
.disable_dmub_reallow_idle = false,
|
||||||
.static_screen_wait_frames = 2,
|
.static_screen_wait_frames = 2,
|
||||||
.notify_dpia_hr_bw = true,
|
.notify_dpia_hr_bw = true,
|
||||||
|
.min_disp_clk_khz = 50000,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dc_panel_config panel_config_defaults = {
|
static const struct dc_panel_config panel_config_defaults = {
|
||||||
|
|
|
@ -6083,6 +6083,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||||
struct drm_dp_aux *immediate_upstream_aux;
|
struct drm_dp_aux *immediate_upstream_aux;
|
||||||
struct drm_dp_mst_port *fec_port;
|
struct drm_dp_mst_port *fec_port;
|
||||||
struct drm_dp_desc desc = {};
|
struct drm_dp_desc desc = {};
|
||||||
|
u8 upstream_dsc;
|
||||||
u8 endpoint_fec;
|
u8 endpoint_fec;
|
||||||
u8 endpoint_dsc;
|
u8 endpoint_dsc;
|
||||||
|
|
||||||
|
@ -6109,8 +6110,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||||
|
|
||||||
/* DP-to-DP peer device */
|
/* DP-to-DP peer device */
|
||||||
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
|
if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
|
||||||
u8 upstream_dsc;
|
|
||||||
|
|
||||||
if (drm_dp_dpcd_read(&port->aux,
|
if (drm_dp_dpcd_read(&port->aux,
|
||||||
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
|
DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -6156,6 +6155,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
|
||||||
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
|
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
|
||||||
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
||||||
|
|
||||||
|
if (drm_dp_dpcd_read(immediate_upstream_aux,
|
||||||
|
DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
|
if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
|
@ -521,8 +521,6 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
|
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
|
||||||
|
|
||||||
#define HDMI_MAX_INFOFRAME_SIZE 29
|
|
||||||
|
|
||||||
static int clear_device_infoframe(struct drm_connector *connector,
|
static int clear_device_infoframe(struct drm_connector *connector,
|
||||||
enum hdmi_infoframe_type type)
|
enum hdmi_infoframe_type type)
|
||||||
{
|
{
|
||||||
|
@ -563,7 +561,7 @@ static int write_device_infoframe(struct drm_connector *connector,
|
||||||
{
|
{
|
||||||
const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
|
const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs;
|
||||||
struct drm_device *dev = connector->dev;
|
struct drm_device *dev = connector->dev;
|
||||||
u8 buffer[HDMI_MAX_INFOFRAME_SIZE];
|
u8 buffer[HDMI_INFOFRAME_SIZE(MAX)];
|
||||||
int ret;
|
int ret;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
|
|
|
@ -543,7 +543,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
|
||||||
&state->fb_damage_clips,
|
&state->fb_damage_clips,
|
||||||
val,
|
val,
|
||||||
-1,
|
-1,
|
||||||
sizeof(struct drm_rect),
|
sizeof(struct drm_mode_rect),
|
||||||
&replaced);
|
&replaced);
|
||||||
return ret;
|
return ret;
|
||||||
} else if (property == plane->scaling_filter_property) {
|
} else if (property == plane->scaling_filter_property) {
|
||||||
|
|
|
@ -520,8 +520,6 @@ static const struct file_operations drm_connector_fops = {
|
||||||
.write = connector_write
|
.write = connector_write
|
||||||
};
|
};
|
||||||
|
|
||||||
#define HDMI_MAX_INFOFRAME_SIZE 29
|
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos)
|
audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
@ -579,7 +577,7 @@ static ssize_t _f##_read_infoframe(struct file *filp, \
|
||||||
struct drm_connector *connector; \
|
struct drm_connector *connector; \
|
||||||
union hdmi_infoframe *frame; \
|
union hdmi_infoframe *frame; \
|
||||||
struct drm_device *dev; \
|
struct drm_device *dev; \
|
||||||
u8 buf[HDMI_MAX_INFOFRAME_SIZE]; \
|
u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \
|
||||||
ssize_t len = 0; \
|
ssize_t len = 0; \
|
||||||
\
|
\
|
||||||
connector = filp->private_data; \
|
connector = filp->private_data; \
|
||||||
|
|
|
@ -1131,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
|
||||||
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
|
GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0)
|
||||||
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
|
intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
|
||||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||||
|
|
||||||
|
|
|
@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
|
||||||
.read = drm_read,
|
.read = drm_read,
|
||||||
.llseek = noop_llseek,
|
.llseek = noop_llseek,
|
||||||
.mmap = panthor_mmap,
|
.mmap = panthor_mmap,
|
||||||
|
.fop_flags = FOP_UNSIGNED_OFFSET,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
|
|
@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
|
||||||
goto err_cleanup;
|
goto err_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
|
||||||
|
* pre-allocated BO if the <BO,VM> association exists. Given we
|
||||||
|
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
|
||||||
|
* be called immediately, and we have to hold the VM resv lock when
|
||||||
|
* calling this function.
|
||||||
|
*/
|
||||||
|
dma_resv_lock(panthor_vm_resv(vm), NULL);
|
||||||
mutex_lock(&bo->gpuva_list_lock);
|
mutex_lock(&bo->gpuva_list_lock);
|
||||||
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
|
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
|
||||||
mutex_unlock(&bo->gpuva_list_lock);
|
mutex_unlock(&bo->gpuva_list_lock);
|
||||||
|
dma_resv_unlock(panthor_vm_resv(vm));
|
||||||
|
|
||||||
/* If the a vm_bo for this <VM,BO> combination exists, it already
|
/* If the a vm_bo for this <VM,BO> combination exists, it already
|
||||||
* retains a pin ref, and we can release the one we took earlier.
|
* retains a pin ref, and we can release the one we took earlier.
|
||||||
|
|
|
@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
|
||||||
list_move_tail(&group->wait_node,
|
list_move_tail(&group->wait_node,
|
||||||
&group->ptdev->scheduler->groups.waiting);
|
&group->ptdev->scheduler->groups.waiting);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The queue is only blocked if there's no deferred operation
|
||||||
|
* pending, which can be checked through the scoreboard status.
|
||||||
|
*/
|
||||||
|
if (!cs_iface->output->status_scoreboards)
|
||||||
group->blocked_queues |= BIT(cs_id);
|
group->blocked_queues |= BIT(cs_id);
|
||||||
|
|
||||||
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
|
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
|
||||||
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
|
queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
|
||||||
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
|
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
|
||||||
|
@ -2046,6 +2052,7 @@ static void
|
||||||
tick_ctx_cleanup(struct panthor_scheduler *sched,
|
tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||||
struct panthor_sched_tick_ctx *ctx)
|
struct panthor_sched_tick_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
struct panthor_device *ptdev = sched->ptdev;
|
||||||
struct panthor_group *group, *tmp;
|
struct panthor_group *group, *tmp;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
|
@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||||
/* If everything went fine, we should only have groups
|
/* If everything went fine, we should only have groups
|
||||||
* to be terminated in the old_groups lists.
|
* to be terminated in the old_groups lists.
|
||||||
*/
|
*/
|
||||||
drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask &&
|
drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
|
||||||
group_can_run(group));
|
group_can_run(group));
|
||||||
|
|
||||||
if (!group_can_run(group)) {
|
if (!group_can_run(group)) {
|
||||||
|
@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
|
||||||
/* If everything went fine, the groups to schedule lists should
|
/* If everything went fine, the groups to schedule lists should
|
||||||
* be empty.
|
* be empty.
|
||||||
*/
|
*/
|
||||||
drm_WARN_ON(&group->ptdev->base,
|
drm_WARN_ON(&ptdev->base,
|
||||||
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
|
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
|
||||||
|
|
||||||
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
|
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
|
||||||
|
@ -3242,6 +3249,18 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
|
||||||
|
u32 group_handle)
|
||||||
|
{
|
||||||
|
struct panthor_group *group;
|
||||||
|
|
||||||
|
xa_lock(&pool->xa);
|
||||||
|
group = group_get(xa_load(&pool->xa, group_handle));
|
||||||
|
xa_unlock(&pool->xa);
|
||||||
|
|
||||||
|
return group;
|
||||||
|
}
|
||||||
|
|
||||||
int panthor_group_get_state(struct panthor_file *pfile,
|
int panthor_group_get_state(struct panthor_file *pfile,
|
||||||
struct drm_panthor_group_get_state *get_state)
|
struct drm_panthor_group_get_state *get_state)
|
||||||
{
|
{
|
||||||
|
@ -3253,7 +3272,7 @@ int panthor_group_get_state(struct panthor_file *pfile,
|
||||||
if (get_state->pad)
|
if (get_state->pad)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
group = group_get(xa_load(&gpool->xa, get_state->group_handle));
|
group = group_from_handle(gpool, get_state->group_handle);
|
||||||
if (!group)
|
if (!group)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -3384,7 +3403,7 @@ panthor_job_create(struct panthor_file *pfile,
|
||||||
job->call_info.latest_flush = qsubmit->latest_flush;
|
job->call_info.latest_flush = qsubmit->latest_flush;
|
||||||
INIT_LIST_HEAD(&job->node);
|
INIT_LIST_HEAD(&job->node);
|
||||||
|
|
||||||
job->group = group_get(xa_load(&gpool->xa, group_handle));
|
job->group = group_from_handle(gpool, group_handle);
|
||||||
if (!job->group) {
|
if (!job->group) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_put_job;
|
goto err_put_job;
|
||||||
|
@ -3424,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
|
||||||
{
|
{
|
||||||
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
|
struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
|
||||||
|
|
||||||
/* Still not sure why we want USAGE_WRITE for external objects, since I
|
|
||||||
* was assuming this would be handled through explicit syncs being imported
|
|
||||||
* to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
|
|
||||||
* seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
|
|
||||||
*/
|
|
||||||
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
|
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
|
||||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
|
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
|
||||||
}
|
}
|
||||||
|
|
||||||
void panthor_sched_unplug(struct panthor_device *ptdev)
|
void panthor_sched_unplug(struct panthor_device *ptdev)
|
||||||
|
|
|
@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||||
{
|
{
|
||||||
WARN_ON(!num_sched_list || !sched_list);
|
WARN_ON(!num_sched_list || !sched_list);
|
||||||
|
|
||||||
|
spin_lock(&entity->rq_lock);
|
||||||
entity->sched_list = sched_list;
|
entity->sched_list = sched_list;
|
||||||
entity->num_sched_list = num_sched_list;
|
entity->num_sched_list = num_sched_list;
|
||||||
|
spin_unlock(&entity->rq_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
|
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
|
||||||
|
|
||||||
|
@ -380,7 +382,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
|
||||||
container_of(cb, struct drm_sched_entity, cb);
|
container_of(cb, struct drm_sched_entity, cb);
|
||||||
|
|
||||||
drm_sched_entity_clear_dep(f, cb);
|
drm_sched_entity_clear_dep(f, cb);
|
||||||
drm_sched_wakeup(entity->rq->sched, entity);
|
drm_sched_wakeup(entity->rq->sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||||
|
|
||||||
/* first job wakes up scheduler */
|
/* first job wakes up scheduler */
|
||||||
if (first) {
|
if (first) {
|
||||||
|
struct drm_gpu_scheduler *sched;
|
||||||
|
struct drm_sched_rq *rq;
|
||||||
|
|
||||||
/* Add the entity to the run queue */
|
/* Add the entity to the run queue */
|
||||||
spin_lock(&entity->rq_lock);
|
spin_lock(&entity->rq_lock);
|
||||||
if (entity->stopped) {
|
if (entity->stopped) {
|
||||||
|
@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_sched_rq_add_entity(entity->rq, entity);
|
rq = entity->rq;
|
||||||
|
sched = rq->sched;
|
||||||
|
|
||||||
|
drm_sched_rq_add_entity(rq, entity);
|
||||||
spin_unlock(&entity->rq_lock);
|
spin_unlock(&entity->rq_lock);
|
||||||
|
|
||||||
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
|
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
|
||||||
drm_sched_rq_update_fifo(entity, submit_ts);
|
drm_sched_rq_update_fifo(entity, submit_ts);
|
||||||
|
|
||||||
drm_sched_wakeup(entity->rq->sched, entity);
|
drm_sched_wakeup(sched);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
||||||
|
|
|
@ -1013,14 +1013,11 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
|
||||||
/**
|
/**
|
||||||
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue
|
* drm_sched_wakeup - Wake up the scheduler if it is ready to queue
|
||||||
* @sched: scheduler instance
|
* @sched: scheduler instance
|
||||||
* @entity: the scheduler entity
|
|
||||||
*
|
*
|
||||||
* Wake up the scheduler if we can queue jobs.
|
* Wake up the scheduler if we can queue jobs.
|
||||||
*/
|
*/
|
||||||
void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
|
void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
|
||||||
struct drm_sched_entity *entity)
|
|
||||||
{
|
{
|
||||||
if (drm_sched_can_queue(sched, entity))
|
|
||||||
drm_sched_run_job_queue(sched);
|
drm_sched_run_job_queue(sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
|
||||||
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
|
/*
|
||||||
|
* The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
|
||||||
|
* from having a 4 bytes fixed array at the end to using a proper VLA
|
||||||
|
* at the end. These 4 extra bytes were not subtracted from sizeof(*p)
|
||||||
|
* before the switch to the VLA, so this way the behavior is unchanged.
|
||||||
|
* Chances are these 4 extra bytes are not necessary but they are kept
|
||||||
|
* to avoid regressions.
|
||||||
|
*/
|
||||||
|
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
|
||||||
VBVA_MOUSE_POINTER_SHAPE);
|
VBVA_MOUSE_POINTER_SHAPE);
|
||||||
if (!p)
|
if (!p)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
|
||||||
* Bytes in the gap between the AND and the XOR mask are undefined.
|
* Bytes in the gap between the AND and the XOR mask are undefined.
|
||||||
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
||||||
* xor_len = width * 4 * height.
|
* xor_len = width * 4 * height.
|
||||||
*
|
|
||||||
* Preallocate 4 bytes for accessing actual data as p->data.
|
|
||||||
*/
|
*/
|
||||||
u8 data[4];
|
u8 data[];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/* pointer is visible */
|
/* pointer is visible */
|
||||||
|
|
|
@ -169,6 +169,8 @@
|
||||||
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
|
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
|
||||||
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
|
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
|
||||||
|
|
||||||
|
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
|
||||||
|
|
||||||
#define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED)
|
#define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED)
|
||||||
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
|
#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
|
||||||
|
|
||||||
|
@ -378,6 +380,9 @@
|
||||||
#define L3SQCREG3 XE_REG_MCR(0xb108)
|
#define L3SQCREG3 XE_REG_MCR(0xb108)
|
||||||
#define COMPPWOVERFETCHEN REG_BIT(28)
|
#define COMPPWOVERFETCHEN REG_BIT(28)
|
||||||
|
|
||||||
|
#define SCRATCH3_LBCF XE_REG_MCR(0xb154)
|
||||||
|
#define RWFLUSHALLEN REG_BIT(17)
|
||||||
|
|
||||||
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
|
#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
|
||||||
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
|
#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
|
||||||
|
|
||||||
|
@ -391,6 +396,12 @@
|
||||||
#define SCRATCH1LPFC XE_REG(0xb474)
|
#define SCRATCH1LPFC XE_REG(0xb474)
|
||||||
#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
|
#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
|
||||||
|
|
||||||
|
#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604)
|
||||||
|
|
||||||
|
#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608)
|
||||||
|
|
||||||
|
#define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654)
|
||||||
|
|
||||||
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
|
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
|
||||||
|
|
||||||
#define XE2_TDF_CTRL XE_REG(0xb418)
|
#define XE2_TDF_CTRL XE_REG(0xb418)
|
||||||
|
|
|
@ -680,8 +680,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
||||||
tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
|
tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
|
||||||
(ttm->page_flags & TTM_TT_FLAG_SWAPPED));
|
(ttm->page_flags & TTM_TT_FLAG_SWAPPED));
|
||||||
|
|
||||||
move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) :
|
move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
|
||||||
(!mem_type_is_vram(old_mem_type) && !tt_has_data);
|
(!mem_type_is_vram(old_mem_type) && !tt_has_data));
|
||||||
|
|
||||||
needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
|
needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
|
||||||
(!ttm && ttm_bo->type == ttm_bo_type_device);
|
(!ttm && ttm_bo->type == ttm_bo_type_device);
|
||||||
|
|
|
@ -171,10 +171,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
|
||||||
xe_exec_queue_kill(q);
|
xe_exec_queue_kill(q);
|
||||||
xe_exec_queue_put(q);
|
xe_exec_queue_put(q);
|
||||||
}
|
}
|
||||||
mutex_lock(&xef->vm.lock);
|
|
||||||
xa_for_each(&xef->vm.xa, idx, vm)
|
xa_for_each(&xef->vm.xa, idx, vm)
|
||||||
xe_vm_close_and_put(vm);
|
xe_vm_close_and_put(vm);
|
||||||
mutex_unlock(&xef->vm.lock);
|
|
||||||
|
|
||||||
xe_file_put(xef);
|
xe_file_put(xef);
|
||||||
|
|
||||||
|
@ -298,6 +296,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
|
||||||
if (xe->unordered_wq)
|
if (xe->unordered_wq)
|
||||||
destroy_workqueue(xe->unordered_wq);
|
destroy_workqueue(xe->unordered_wq);
|
||||||
|
|
||||||
|
if (xe->destroy_wq)
|
||||||
|
destroy_workqueue(xe->destroy_wq);
|
||||||
|
|
||||||
ttm_device_fini(&xe->ttm);
|
ttm_device_fini(&xe->ttm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,9 +337,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||||
|
|
||||||
init_waitqueue_head(&xe->ufence_wq);
|
init_waitqueue_head(&xe->ufence_wq);
|
||||||
|
|
||||||
err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
|
init_rwsem(&xe->usm.lock);
|
||||||
if (err)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
|
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
|
||||||
|
|
||||||
|
@ -363,8 +362,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||||
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
|
xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
|
||||||
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
|
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
|
||||||
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
|
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
|
||||||
|
xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
|
||||||
if (!xe->ordered_wq || !xe->unordered_wq ||
|
if (!xe->ordered_wq || !xe->unordered_wq ||
|
||||||
!xe->preempt_fence_wq) {
|
!xe->preempt_fence_wq || !xe->destroy_wq) {
|
||||||
/*
|
/*
|
||||||
* Cleanup done in xe_device_destroy via
|
* Cleanup done in xe_device_destroy via
|
||||||
* drmm_add_action_or_reset register above
|
* drmm_add_action_or_reset register above
|
||||||
|
|
|
@ -369,7 +369,7 @@ struct xe_device {
|
||||||
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
|
/** @usm.next_asid: next ASID, used to cyclical alloc asids */
|
||||||
u32 next_asid;
|
u32 next_asid;
|
||||||
/** @usm.lock: protects UM state */
|
/** @usm.lock: protects UM state */
|
||||||
struct mutex lock;
|
struct rw_semaphore lock;
|
||||||
} usm;
|
} usm;
|
||||||
|
|
||||||
/** @pinned: pinned BO state */
|
/** @pinned: pinned BO state */
|
||||||
|
@ -396,6 +396,9 @@ struct xe_device {
|
||||||
/** @unordered_wq: used to serialize unordered work, mostly display */
|
/** @unordered_wq: used to serialize unordered work, mostly display */
|
||||||
struct workqueue_struct *unordered_wq;
|
struct workqueue_struct *unordered_wq;
|
||||||
|
|
||||||
|
/** @destroy_wq: used to serialize user destroy work, like queue */
|
||||||
|
struct workqueue_struct *destroy_wq;
|
||||||
|
|
||||||
/** @tiles: device tiles */
|
/** @tiles: device tiles */
|
||||||
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
|
struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
|
||||||
|
|
||||||
|
@ -567,15 +570,23 @@ struct xe_file {
|
||||||
struct {
|
struct {
|
||||||
/** @vm.xe: xarray to store VMs */
|
/** @vm.xe: xarray to store VMs */
|
||||||
struct xarray xa;
|
struct xarray xa;
|
||||||
/** @vm.lock: protects file VM state */
|
/**
|
||||||
|
* @vm.lock: Protects VM lookup + reference and removal a from
|
||||||
|
* file xarray. Not an intended to be an outer lock which does
|
||||||
|
* thing while being held.
|
||||||
|
*/
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
} vm;
|
} vm;
|
||||||
|
|
||||||
/** @exec_queue: Submission exec queue state for file */
|
/** @exec_queue: Submission exec queue state for file */
|
||||||
struct {
|
struct {
|
||||||
/** @exec_queue.xe: xarray to store engines */
|
/** @exec_queue.xa: xarray to store exece queues */
|
||||||
struct xarray xa;
|
struct xarray xa;
|
||||||
/** @exec_queue.lock: protects file engine state */
|
/**
|
||||||
|
* @exec_queue.lock: Protects exec queue lookup + reference and
|
||||||
|
* removal a frommfile xarray. Not an intended to be an outer
|
||||||
|
* lock which does thing while being held.
|
||||||
|
*/
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
} exec_queue;
|
} exec_queue;
|
||||||
|
|
||||||
|
|
|
@ -283,8 +283,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
|
||||||
|
|
||||||
/* Accumulate all the exec queues from this client */
|
/* Accumulate all the exec queues from this client */
|
||||||
mutex_lock(&xef->exec_queue.lock);
|
mutex_lock(&xef->exec_queue.lock);
|
||||||
xa_for_each(&xef->exec_queue.xa, i, q)
|
xa_for_each(&xef->exec_queue.xa, i, q) {
|
||||||
|
xe_exec_queue_get(q);
|
||||||
|
mutex_unlock(&xef->exec_queue.lock);
|
||||||
|
|
||||||
xe_exec_queue_update_run_ticks(q);
|
xe_exec_queue_update_run_ticks(q);
|
||||||
|
|
||||||
|
mutex_lock(&xef->exec_queue.lock);
|
||||||
|
xe_exec_queue_put(q);
|
||||||
|
}
|
||||||
mutex_unlock(&xef->exec_queue.lock);
|
mutex_unlock(&xef->exec_queue.lock);
|
||||||
|
|
||||||
/* Get the total GPU cycles */
|
/* Get the total GPU cycles */
|
||||||
|
|
|
@ -635,14 +635,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&xef->exec_queue.lock);
|
q->xef = xe_file_get(xef);
|
||||||
|
|
||||||
|
/* user id alloc must always be last in ioctl to prevent UAF */
|
||||||
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
|
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
|
||||||
mutex_unlock(&xef->exec_queue.lock);
|
|
||||||
if (err)
|
if (err)
|
||||||
goto kill_exec_queue;
|
goto kill_exec_queue;
|
||||||
|
|
||||||
args->exec_queue_id = id;
|
args->exec_queue_id = id;
|
||||||
q->xef = xe_file_get(xef);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -90,6 +90,11 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
|
||||||
cancel_work_sync(&sched->work_process_msg);
|
cancel_work_sync(&sched->work_process_msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
|
||||||
|
{
|
||||||
|
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
|
||||||
|
}
|
||||||
|
|
||||||
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
||||||
struct xe_sched_msg *msg)
|
struct xe_sched_msg *msg)
|
||||||
{
|
{
|
||||||
|
|
|
@ -22,6 +22,8 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
|
||||||
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
|
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
|
||||||
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
|
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
|
||||||
|
|
||||||
|
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
|
||||||
|
|
||||||
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
|
||||||
struct xe_sched_msg *msg);
|
struct xe_sched_msg *msg);
|
||||||
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
|
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
|
||||||
|
|
|
@ -237,11 +237,11 @@ int xe_gt_freq_init(struct xe_gt *gt)
|
||||||
if (!gt->freq)
|
if (!gt->freq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
err = devm_add_action(xe->drm.dev, freq_fini, gt->freq);
|
err = sysfs_create_files(gt->freq, freq_attrs);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = sysfs_create_files(gt->freq, freq_attrs);
|
err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -439,7 +439,7 @@ void xe_gt_mcr_init(struct xe_gt *gt)
|
||||||
if (gt->info.type == XE_GT_TYPE_MEDIA) {
|
if (gt->info.type == XE_GT_TYPE_MEDIA) {
|
||||||
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
|
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
|
||||||
|
|
||||||
if (MEDIA_VER(xe) >= 20) {
|
if (MEDIA_VERx100(xe) >= 1301) {
|
||||||
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
|
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
|
||||||
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
|
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -185,6 +185,21 @@ unlock_dma_resv:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
|
||||||
|
{
|
||||||
|
struct xe_vm *vm;
|
||||||
|
|
||||||
|
down_read(&xe->usm.lock);
|
||||||
|
vm = xa_load(&xe->usm.asid_to_vm, asid);
|
||||||
|
if (vm && xe_vm_in_fault_mode(vm))
|
||||||
|
xe_vm_get(vm);
|
||||||
|
else
|
||||||
|
vm = ERR_PTR(-EINVAL);
|
||||||
|
up_read(&xe->usm.lock);
|
||||||
|
|
||||||
|
return vm;
|
||||||
|
}
|
||||||
|
|
||||||
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
||||||
{
|
{
|
||||||
struct xe_device *xe = gt_to_xe(gt);
|
struct xe_device *xe = gt_to_xe(gt);
|
||||||
|
@ -197,16 +212,9 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
|
||||||
if (pf->trva_fault)
|
if (pf->trva_fault)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
/* ASID to VM */
|
vm = asid_to_vm(xe, pf->asid);
|
||||||
mutex_lock(&xe->usm.lock);
|
if (IS_ERR(vm))
|
||||||
vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
|
return PTR_ERR(vm);
|
||||||
if (vm && xe_vm_in_fault_mode(vm))
|
|
||||||
xe_vm_get(vm);
|
|
||||||
else
|
|
||||||
vm = NULL;
|
|
||||||
mutex_unlock(&xe->usm.lock);
|
|
||||||
if (!vm)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: Change to read lock? Using write lock for simplicity.
|
* TODO: Change to read lock? Using write lock for simplicity.
|
||||||
|
@ -548,14 +556,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
|
||||||
if (acc->access_type != ACC_TRIGGER)
|
if (acc->access_type != ACC_TRIGGER)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* ASID to VM */
|
vm = asid_to_vm(xe, acc->asid);
|
||||||
mutex_lock(&xe->usm.lock);
|
if (IS_ERR(vm))
|
||||||
vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
|
return PTR_ERR(vm);
|
||||||
if (vm)
|
|
||||||
xe_vm_get(vm);
|
|
||||||
mutex_unlock(&xe->usm.lock);
|
|
||||||
if (!vm || !xe_vm_in_fault_mode(vm))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
down_read(&vm->lock);
|
down_read(&vm->lock);
|
||||||
|
|
||||||
|
|
|
@ -51,5 +51,5 @@ int xe_gt_sysfs_init(struct xe_gt *gt)
|
||||||
|
|
||||||
gt->sysfs = &kg->base;
|
gt->sysfs = &kg->base;
|
||||||
|
|
||||||
return devm_add_action(xe->drm.dev, gt_sysfs_fini, gt);
|
return devm_add_action_or_reset(xe->drm.dev, gt_sysfs_fini, gt);
|
||||||
}
|
}
|
||||||
|
|
|
@ -276,10 +276,26 @@ static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static void xe_guc_submit_fini(struct xe_guc *guc)
|
||||||
|
{
|
||||||
|
struct xe_device *xe = guc_to_xe(guc);
|
||||||
|
struct xe_gt *gt = guc_to_gt(guc);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = wait_event_timeout(guc->submission_state.fini_wq,
|
||||||
|
xa_empty(&guc->submission_state.exec_queue_lookup),
|
||||||
|
HZ * 5);
|
||||||
|
|
||||||
|
drain_workqueue(xe->destroy_wq);
|
||||||
|
|
||||||
|
xe_gt_assert(gt, ret);
|
||||||
|
}
|
||||||
|
|
||||||
static void guc_submit_fini(struct drm_device *drm, void *arg)
|
static void guc_submit_fini(struct drm_device *drm, void *arg)
|
||||||
{
|
{
|
||||||
struct xe_guc *guc = arg;
|
struct xe_guc *guc = arg;
|
||||||
|
|
||||||
|
xe_guc_submit_fini(guc);
|
||||||
xa_destroy(&guc->submission_state.exec_queue_lookup);
|
xa_destroy(&guc->submission_state.exec_queue_lookup);
|
||||||
free_submit_wq(guc);
|
free_submit_wq(guc);
|
||||||
}
|
}
|
||||||
|
@ -290,9 +306,15 @@ static void guc_submit_wedged_fini(void *arg)
|
||||||
struct xe_exec_queue *q;
|
struct xe_exec_queue *q;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
|
|
||||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
mutex_lock(&guc->submission_state.lock);
|
||||||
if (exec_queue_wedged(q))
|
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||||
|
if (exec_queue_wedged(q)) {
|
||||||
|
mutex_unlock(&guc->submission_state.lock);
|
||||||
xe_exec_queue_put(q);
|
xe_exec_queue_put(q);
|
||||||
|
mutex_lock(&guc->submission_state.lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&guc->submission_state.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct xe_exec_queue_ops guc_exec_queue_ops;
|
static const struct xe_exec_queue_ops guc_exec_queue_ops;
|
||||||
|
@ -345,6 +367,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
|
||||||
|
|
||||||
xa_init(&guc->submission_state.exec_queue_lookup);
|
xa_init(&guc->submission_state.exec_queue_lookup);
|
||||||
|
|
||||||
|
init_waitqueue_head(&guc->submission_state.fini_wq);
|
||||||
|
|
||||||
primelockdep(guc);
|
primelockdep(guc);
|
||||||
|
|
||||||
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
|
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
|
||||||
|
@ -361,6 +385,9 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
|
||||||
|
|
||||||
xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
|
xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
|
||||||
q->guc->id, q->width);
|
q->guc->id, q->width);
|
||||||
|
|
||||||
|
if (xa_empty(&guc->submission_state.exec_queue_lookup))
|
||||||
|
wake_up(&guc->submission_state.fini_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
|
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||||
|
@ -1268,13 +1295,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
|
||||||
|
|
||||||
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
|
static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
|
||||||
{
|
{
|
||||||
|
struct xe_guc *guc = exec_queue_to_guc(q);
|
||||||
|
struct xe_device *xe = guc_to_xe(guc);
|
||||||
|
|
||||||
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
|
INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
|
||||||
|
|
||||||
/* We must block on kernel engines so slabs are empty on driver unload */
|
/* We must block on kernel engines so slabs are empty on driver unload */
|
||||||
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
|
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
|
||||||
__guc_exec_queue_fini_async(&q->guc->fini_async);
|
__guc_exec_queue_fini_async(&q->guc->fini_async);
|
||||||
else
|
else
|
||||||
queue_work(system_wq, &q->guc->fini_async);
|
queue_work(xe->destroy_wq, &q->guc->fini_async);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
|
static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
|
||||||
|
@ -1796,6 +1826,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q)
|
||||||
}
|
}
|
||||||
|
|
||||||
xe_sched_submission_start(sched);
|
xe_sched_submission_start(sched);
|
||||||
|
xe_sched_submission_resume_tdr(sched);
|
||||||
}
|
}
|
||||||
|
|
||||||
int xe_guc_submit_start(struct xe_guc *guc)
|
int xe_guc_submit_start(struct xe_guc *guc)
|
||||||
|
|
|
@ -81,6 +81,8 @@ struct xe_guc {
|
||||||
#endif
|
#endif
|
||||||
/** @submission_state.enabled: submission is enabled */
|
/** @submission_state.enabled: submission is enabled */
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
/** @submission_state.fini_wq: submit fini wait queue */
|
||||||
|
wait_queue_head_t fini_wq;
|
||||||
} submission_state;
|
} submission_state;
|
||||||
/** @hwconfig: Hardware config state */
|
/** @hwconfig: Hardware config state */
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -709,8 +709,7 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
|
||||||
{
|
{
|
||||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||||
regs_offset + CTX_CONTEXT_CONTROL,
|
regs_offset + CTX_CONTEXT_CONTROL,
|
||||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
|
||||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
|
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
|
||||||
|
@ -742,10 +741,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
|
||||||
{
|
{
|
||||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||||
regs_offset + CTX_CONTEXT_CONTROL,
|
regs_offset + CTX_CONTEXT_CONTROL,
|
||||||
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
|
||||||
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
|
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
|
||||||
_MASKED_FIELD(CTX_CTRL_RUN_ALONE,
|
|
||||||
enable ? CTX_CTRL_RUN_ALONE : 0),
|
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
|
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
|
||||||
|
|
|
@ -924,6 +924,8 @@ static int xe_pci_resume(struct device *dev)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
pci_restore_state(pdev);
|
||||||
|
|
||||||
err = pci_enable_device(pdev);
|
err = pci_enable_device(pdev);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -2188,5 +2188,5 @@ void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
|
||||||
pt_op->num_entries);
|
pt_op->num_entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
|
xe_pt_update_ops_fini(tile, vops);
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,20 +42,48 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
|
||||||
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
|
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
|
||||||
SET(CCCHKNREG1, L3CMPCTRL))
|
SET(CCCHKNREG1, L3CMPCTRL))
|
||||||
},
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: Compression Overfetch - media"),
|
||||||
|
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||||
|
XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX),
|
||||||
|
SET(XE2LPM_CCCHKNREG1, L3CMPCTRL))
|
||||||
|
},
|
||||||
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
|
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"),
|
||||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||||
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
|
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
|
||||||
},
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"),
|
||||||
|
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||||
|
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
|
||||||
|
},
|
||||||
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
|
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
|
||||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||||
XE_RTP_ACTIONS(SET(L3SQCREG2,
|
XE_RTP_ACTIONS(SET(L3SQCREG2,
|
||||||
COMPMEMRD256BOVRFETCHEN))
|
COMPMEMRD256BOVRFETCHEN))
|
||||||
},
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
|
||||||
|
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||||
|
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
|
||||||
|
COMPMEMRD256BOVRFETCHEN))
|
||||||
|
},
|
||||||
{ XE_RTP_NAME("Tuning: Stateless compression control"),
|
{ XE_RTP_NAME("Tuning: Stateless compression control"),
|
||||||
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
|
||||||
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
|
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
|
||||||
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
|
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
|
||||||
},
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: Stateless compression control - media"),
|
||||||
|
XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)),
|
||||||
|
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
|
||||||
|
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
|
||||||
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: L3 RW flush all Cache"),
|
||||||
|
XE_RTP_RULES(GRAPHICS_VERSION(2004)),
|
||||||
|
XE_RTP_ACTIONS(SET(SCRATCH3_LBCF, RWFLUSHALLEN))
|
||||||
|
},
|
||||||
|
{ XE_RTP_NAME("Tuning: L3 RW flush all cache - media"),
|
||||||
|
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||||
|
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
|
||||||
|
},
|
||||||
|
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||||
|
|
||||||
up_write(&vm->lock);
|
up_write(&vm->lock);
|
||||||
|
|
||||||
mutex_lock(&xe->usm.lock);
|
down_write(&xe->usm.lock);
|
||||||
if (vm->usm.asid) {
|
if (vm->usm.asid) {
|
||||||
void *lookup;
|
void *lookup;
|
||||||
|
|
||||||
|
@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
|
||||||
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
|
||||||
xe_assert(xe, lookup == vm);
|
xe_assert(xe, lookup == vm);
|
||||||
}
|
}
|
||||||
mutex_unlock(&xe->usm.lock);
|
up_write(&xe->usm.lock);
|
||||||
|
|
||||||
for_each_tile(tile, xe, id)
|
for_each_tile(tile, xe, id)
|
||||||
xe_range_fence_tree_fini(&vm->rftree[id]);
|
xe_range_fence_tree_fini(&vm->rftree[id]);
|
||||||
|
@ -1765,25 +1765,18 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||||
if (IS_ERR(vm))
|
if (IS_ERR(vm))
|
||||||
return PTR_ERR(vm);
|
return PTR_ERR(vm);
|
||||||
|
|
||||||
mutex_lock(&xef->vm.lock);
|
|
||||||
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
|
||||||
mutex_unlock(&xef->vm.lock);
|
|
||||||
if (err)
|
|
||||||
goto err_close_and_put;
|
|
||||||
|
|
||||||
if (xe->info.has_asid) {
|
if (xe->info.has_asid) {
|
||||||
mutex_lock(&xe->usm.lock);
|
down_write(&xe->usm.lock);
|
||||||
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
|
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
|
||||||
XA_LIMIT(1, XE_MAX_ASID - 1),
|
XA_LIMIT(1, XE_MAX_ASID - 1),
|
||||||
&xe->usm.next_asid, GFP_KERNEL);
|
&xe->usm.next_asid, GFP_KERNEL);
|
||||||
mutex_unlock(&xe->usm.lock);
|
up_write(&xe->usm.lock);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto err_free_id;
|
goto err_close_and_put;
|
||||||
|
|
||||||
vm->usm.asid = asid;
|
vm->usm.asid = asid;
|
||||||
}
|
}
|
||||||
|
|
||||||
args->vm_id = id;
|
|
||||||
vm->xef = xe_file_get(xef);
|
vm->xef = xe_file_get(xef);
|
||||||
|
|
||||||
/* Record BO memory for VM pagetable created against client */
|
/* Record BO memory for VM pagetable created against client */
|
||||||
|
@ -1796,12 +1789,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||||
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
|
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* user id alloc must always be last in ioctl to prevent UAF */
|
||||||
|
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||||
|
if (err)
|
||||||
|
goto err_close_and_put;
|
||||||
|
|
||||||
|
args->vm_id = id;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free_id:
|
|
||||||
mutex_lock(&xef->vm.lock);
|
|
||||||
xa_erase(&xef->vm.xa, id);
|
|
||||||
mutex_unlock(&xef->vm.lock);
|
|
||||||
err_close_and_put:
|
err_close_and_put:
|
||||||
xe_vm_close_and_put(vm);
|
xe_vm_close_and_put(vm);
|
||||||
|
|
||||||
|
|
|
@ -574,7 +574,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
|
||||||
|
|
||||||
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
|
void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
|
||||||
void drm_sched_job_cleanup(struct drm_sched_job *job);
|
void drm_sched_job_cleanup(struct drm_sched_job *job);
|
||||||
void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
|
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
|
||||||
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
|
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
|
||||||
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
|
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
|
||||||
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
|
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
|
||||||
|
|
|
@ -59,6 +59,15 @@ enum hdmi_infoframe_type {
|
||||||
#define HDMI_DRM_INFOFRAME_SIZE 26
|
#define HDMI_DRM_INFOFRAME_SIZE 26
|
||||||
#define HDMI_VENDOR_INFOFRAME_SIZE 4
|
#define HDMI_VENDOR_INFOFRAME_SIZE 4
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HDMI 1.3a table 5-14 states that the largest InfoFrame_length is 27,
|
||||||
|
* not including the packet header or checksum byte. We include the
|
||||||
|
* checksum byte in HDMI_INFOFRAME_HEADER_SIZE, so this should allow
|
||||||
|
* HDMI_INFOFRAME_SIZE(MAX) to be the largest buffer we could ever need
|
||||||
|
* for any HDMI infoframe.
|
||||||
|
*/
|
||||||
|
#define HDMI_MAX_INFOFRAME_SIZE 27
|
||||||
|
|
||||||
#define HDMI_INFOFRAME_SIZE(type) \
|
#define HDMI_INFOFRAME_SIZE(type) \
|
||||||
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
|
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user