mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
Driver Changes:
- SRIOV VF: Avoid reading inaccessible registers (Jakub, Marcin) - Introduce RPa frequency information (Rodrigo) - Remove unnecessary force wakes on SLPC code (Vinay) - Fix all typos in xe (Nitin) - Adding steering info support for GuC register lists (Jesus) - Remove unused xe_pciids.h harder, add missing PCI ID (Jani) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmeBNBYACgkQ+mJfZA7r E8r02AgAi31FBSX9TH/zWp9o5Y+CLoGDRfRvwmowAx0TyDTI/4hYG7V06NtAH/Ev BvzkLpoH5gTj6ripxZmBgfZV/Bkm0Buj4XYGt6ziX0Dkkb4HLIXeFxWYRC3o5umF cqeQ6AfKkBcBEAvi8EO7Ws5wAg/r2dZyx/iFsjZ1eiFbRq+HmN33wdpBaJ2w9dXO cvnJ9PLMZlt2wf5g6WseL0Mz7w9upRL5SPbpGBifbd0Im0gtLdWUXYdW8Cw6y2E7 Y1Kfs4kZChNMR8+r1v8UzSqehOytUVjxQZ7qR9rtOTiFC5wqDIT7Ir3dBpS4vNl+ kZtj6o1oGpScpBZ/R0qJ3AdDYAKuvw== =+cjz -----END PGP SIGNATURE----- Merge tag 'drm-xe-next-2025-01-10' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next Driver Changes: - SRIOV VF: Avoid reading inaccessible registers (Jakub, Marcin) - Introduce RPa frequency information (Rodrigo) - Remove unnecessary force wakes on SLPC code (Vinay) - Fix all typos in xe (Nitin) - Adding steering info support for GuC register lists (Jesus) - Remove unused xe_pciids.h harder, add missing PCI ID (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Z4E0tlTAA6MZ7PF2@intel.com
This commit is contained in:
commit
0dc853865a
|
@ -66,7 +66,7 @@ config DRM_XE_DEBUG_MEM
|
|||
bool "Enable passing SYS/VRAM addresses to user space"
|
||||
default n
|
||||
help
|
||||
Pass object location trough uapi. Intended for extended
|
||||
Pass object location through uapi. Intended for extended
|
||||
testing and development only.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
@ -104,5 +104,5 @@ config DRM_XE_USERPTR_INVAL_INJECT
|
|||
Choose this option when debugging error paths that
|
||||
are hit during checks for userptr invalidations.
|
||||
|
||||
Recomended for driver developers only.
|
||||
Recommended for driver developers only.
|
||||
If in doubt, say "N".
|
||||
|
|
|
@ -25,7 +25,7 @@ enum guc_state_capture_type {
|
|||
|
||||
#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1)
|
||||
|
||||
/* Class indecies for capture_class and capture_instance arrays */
|
||||
/* Class indices for capture_class and capture_instance arrays */
|
||||
enum guc_capture_list_class_type {
|
||||
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
|
||||
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
|
||||
|
|
|
@ -132,7 +132,7 @@ enum {
|
|||
* _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
|
||||
* This config sets whether strict scheduling is enabled whereby any VF
|
||||
* that doesn’t have work to submit is still allocated a fixed execution
|
||||
* time-slice to ensure active VFs execution is always consitent even
|
||||
* time-slice to ensure active VFs execution is always consistent even
|
||||
* during other VF reprovisiong / rebooting events. Changing this KLV
|
||||
* impacts all VFs and takes effect on the next VF-Switch event.
|
||||
*
|
||||
|
@ -207,7 +207,7 @@ enum {
|
|||
* of and this will never be perfectly-exact (accumulated nano-second
|
||||
* granularity) since the GPUs clock time runs off a different crystal
|
||||
* from the CPUs clock. Changing this KLV on a VF that is currently
|
||||
* running a context wont take effect until a new context is scheduled in.
|
||||
* running a context won't take effect until a new context is scheduled in.
|
||||
* That said, when the PF is changing this value from 0x0 to
|
||||
* a non-zero value, it might never take effect if the VF is running an
|
||||
* infinitely long compute or shader kernel. In such a scenario, the
|
||||
|
@ -227,7 +227,7 @@ enum {
|
|||
* HW is capable and this will never be perfectly-exact (accumulated
|
||||
* nano-second granularity) since the GPUs clock time runs off a
|
||||
* different crystal from the CPUs clock. Changing this KLV on a VF
|
||||
* that is currently running a context wont take effect until a new
|
||||
* that is currently running a context won't take effect until a new
|
||||
* context is scheduled in.
|
||||
* That said, when the PF is changing this value from 0x0 to
|
||||
* a non-zero value, it might never take effect if the VF is running an
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
/**
|
||||
* struct xe_reg - Register definition
|
||||
*
|
||||
* Register defintion to be used by the individual register. Although the same
|
||||
* Register definition to be used by the individual register. Although the same
|
||||
* definition is used for xe_reg and xe_reg_mcr, they use different internal
|
||||
* APIs for accesses.
|
||||
*/
|
||||
|
|
|
@ -44,12 +44,16 @@
|
|||
|
||||
#define MTL_RP_STATE_CAP XE_REG(0x138000)
|
||||
|
||||
#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
|
||||
#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
|
||||
|
||||
#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
|
||||
#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
|
||||
#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
|
||||
|
||||
#define MTL_MPA_FREQUENCY XE_REG(0x138028)
|
||||
#define MTL_RPA_MASK REG_GENMASK(8, 0)
|
||||
|
||||
#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
|
||||
#define MTL_RPE_MASK REG_GENMASK(8, 0)
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ static void read_l3cc_table(struct xe_gt *gt,
|
|||
|
||||
mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
|
||||
} else {
|
||||
/* Just re-use value read on previous iteration */
|
||||
/* Just reuse value read on previous iteration */
|
||||
reg_val >>= 16;
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
|
|||
/*
|
||||
* We need to allocate space for the requested number of dwords,
|
||||
* one additional MI_BATCH_BUFFER_END dword, and additional buffer
|
||||
* space to accomodate the platform-specific hardware prefetch
|
||||
* space to accommodate the platform-specific hardware prefetch
|
||||
* requirements.
|
||||
*/
|
||||
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
|
||||
|
|
|
@ -786,7 +786,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
|||
* / resume, some of the pinned memory is required for the
|
||||
* device to resume / use the GPU to move other evicted memory
|
||||
* (user memory) around. This likely could be optimized a bit
|
||||
* futher where we find the minimum set of pinned memory
|
||||
* further where we find the minimum set of pinned memory
|
||||
* required for resume but for simplity doing a memcpy for all
|
||||
* pinned memory.
|
||||
*/
|
||||
|
@ -875,7 +875,7 @@ out:
|
|||
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
|
||||
* @bo: The buffer object to move.
|
||||
*
|
||||
* On successful completion, the object memory will be moved to sytem memory.
|
||||
* On successful completion, the object memory will be moved to system memory.
|
||||
*
|
||||
* This is needed to for special handling of pinned VRAM object during
|
||||
* suspend-resume.
|
||||
|
@ -1370,7 +1370,7 @@ static const struct drm_gem_object_funcs xe_gem_object_funcs = {
|
|||
/**
|
||||
* xe_bo_alloc - Allocate storage for a struct xe_bo
|
||||
*
|
||||
* This funcition is intended to allocate storage to be used for input
|
||||
* This function is intended to allocate storage to be used for input
|
||||
* to __xe_bo_create_locked(), in the case a pointer to the bo to be
|
||||
* created is needed before the call to __xe_bo_create_locked().
|
||||
* If __xe_bo_create_locked ends up never to be called, then the
|
||||
|
@ -2412,7 +2412,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
|
|||
* @force_alloc: Set force_alloc in ttm_operation_ctx
|
||||
*
|
||||
* On successful completion, the object memory will be moved to evict
|
||||
* placement. Ths function blocks until the object has been fully moved.
|
||||
* placement. This function blocks until the object has been fully moved.
|
||||
*
|
||||
* Return: 0 on success. Negative error code on failure.
|
||||
*/
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
|
||||
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
|
||||
* user BOs are evictable and user BOs are never pinned by XE. The allocation of
|
||||
* the backing store can be defered from creation time until first use which is
|
||||
* the backing store can be deferred from creation time until first use which is
|
||||
* either mmap, bind, or pagefault.
|
||||
*
|
||||
* Private BOs
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
*
|
||||
* **Coredump release**:
|
||||
* After a coredump is generated, it stays in kernel memory until released by
|
||||
* userpace by writing anything to it, or after an internal timer expires. The
|
||||
* userspace by writing anything to it, or after an internal timer expires. The
|
||||
* exact timeout may vary and should not be relied upon. Example to release
|
||||
* a coredump:
|
||||
*
|
||||
|
|
|
@ -606,7 +606,7 @@ static int probe_has_flat_ccs(struct xe_device *xe)
|
|||
u32 reg;
|
||||
|
||||
/* Always enabled/disabled, no runtime check to do */
|
||||
if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
|
||||
if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
|
||||
return 0;
|
||||
|
||||
gt = xe_root_mmio_gt(xe);
|
||||
|
@ -999,7 +999,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
|
|||
* xe_device_declare_wedged - Declare device wedged
|
||||
* @xe: xe device instance
|
||||
*
|
||||
* This is a final state that can only be cleared with a mudule
|
||||
* This is a final state that can only be cleared with a module
|
||||
* re-probe (unbind + bind).
|
||||
* In this state every IOCTL will be blocked so the GT cannot be used.
|
||||
* In general it will be called upon any critical error such as gt reset
|
||||
|
|
|
@ -385,7 +385,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
|
|||
* @p: The drm_printer ptr
|
||||
* @file: The drm_file ptr
|
||||
*
|
||||
* This is callabck for drm fdinfo interface. Register this callback
|
||||
* This is callback for drm fdinfo interface. Register this callback
|
||||
* in drm driver ops for show_fdinfo.
|
||||
*
|
||||
* Return: void
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
*
|
||||
* In XE we avoid all of this complication by not allowing a BO list to be
|
||||
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
|
||||
* seperate operations, and using the DRM scheduler to flow control the ring.
|
||||
* separate operations, and using the DRM scheduler to flow control the ring.
|
||||
* Let's deep dive on each of these.
|
||||
*
|
||||
* We can get away from a BO list by forcing the user to use in / out fences on
|
||||
|
|
|
@ -362,7 +362,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
|
|||
|
||||
/*
|
||||
* So we don't need to worry about 64K GGTT layout when dealing with
|
||||
* scratch entires, rather keep the scratch page in system memory on
|
||||
* scratch entries, rather keep the scratch page in system memory on
|
||||
* platforms where 64K pages are needed for VRAM.
|
||||
*/
|
||||
flags = XE_BO_FLAG_PINNED;
|
||||
|
|
|
@ -37,7 +37,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt);
|
|||
|
||||
/**
|
||||
* xe_gt_record_user_engines - save data related to engines available to
|
||||
* usersapce
|
||||
* userspace
|
||||
* @gt: GT structure
|
||||
*
|
||||
* Walk the available HW engines from gt->info.engine_mask and calculate data
|
||||
|
|
|
@ -115,6 +115,20 @@ static ssize_t rpe_freq_show(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RO(rpe_freq);
|
||||
|
||||
static ssize_t rpa_freq_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct xe_guc_pc *pc = dev_to_pc(dev);
|
||||
u32 freq;
|
||||
|
||||
xe_pm_runtime_get(dev_to_xe(dev));
|
||||
freq = xe_guc_pc_get_rpa_freq(pc);
|
||||
xe_pm_runtime_put(dev_to_xe(dev));
|
||||
|
||||
return sysfs_emit(buf, "%d\n", freq);
|
||||
}
|
||||
static DEVICE_ATTR_RO(rpa_freq);
|
||||
|
||||
static ssize_t rpn_freq_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
|
@ -202,6 +216,7 @@ static const struct attribute *freq_attrs[] = {
|
|||
&dev_attr_act_freq.attr,
|
||||
&dev_attr_cur_freq.attr,
|
||||
&dev_attr_rp0_freq.attr,
|
||||
&dev_attr_rpa_freq.attr,
|
||||
&dev_attr_rpe_freq.attr,
|
||||
&dev_attr_rpn_freq.attr,
|
||||
&dev_attr_min_freq.attr,
|
||||
|
|
|
@ -371,7 +371,7 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
|
|||
* @group: steering group ID
|
||||
* @instance: steering instance ID
|
||||
*
|
||||
* Return: the coverted DSS id.
|
||||
* Return: the converted DSS id.
|
||||
*/
|
||||
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance)
|
||||
{
|
||||
|
@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
|
|||
* Returns true if the caller should steer to the @group/@instance values
|
||||
* returned. Returns false if the caller need not perform any steering
|
||||
*/
|
||||
static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
|
||||
struct xe_reg_mcr reg_mcr,
|
||||
u8 *group, u8 *instance)
|
||||
bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
|
||||
struct xe_reg_mcr reg_mcr,
|
||||
u8 *group, u8 *instance)
|
||||
{
|
||||
const struct xe_reg reg = to_xe_reg(reg_mcr);
|
||||
const struct xe_mmio_range *implicit_ranges;
|
||||
|
|
|
@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
|
|||
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
|
||||
u32 value);
|
||||
|
||||
bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
|
||||
struct xe_reg_mcr reg_mcr,
|
||||
u8 *group, u8 *instance);
|
||||
|
||||
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
|
||||
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
|
||||
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
|
||||
|
|
|
@ -2161,7 +2161,7 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
|
|||
*
|
||||
* This function can only be called on PF.
|
||||
*
|
||||
* Return: mininum size of the buffer or the number of bytes saved,
|
||||
* Return: minimum size of the buffer or the number of bytes saved,
|
||||
* or a negative error code on failure.
|
||||
*/
|
||||
ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "xe_platform_types.h"
|
||||
#include "xe_uc_fw.h"
|
||||
#include "xe_wa.h"
|
||||
#include "xe_gt_mcr.h"
|
||||
|
||||
/* Slack of a few additional entries per engine */
|
||||
#define ADS_REGSET_EXTRA_MAX 8
|
||||
|
@ -696,6 +697,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
|
|||
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
|
||||
};
|
||||
|
||||
if (reg.mcr) {
|
||||
struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
|
||||
u8 group, instance;
|
||||
|
||||
bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
|
||||
&group, &instance);
|
||||
|
||||
if (steer) {
|
||||
entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
|
||||
entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
|
||||
entry.flags |= GUC_REGSET_STEERING_NEEDED;
|
||||
}
|
||||
}
|
||||
|
||||
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
|
||||
&entry, sizeof(entry));
|
||||
}
|
||||
|
|
|
@ -1955,7 +1955,7 @@ xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
|
|||
}
|
||||
|
||||
/*
|
||||
* xe_guc_capture_put_matched_nodes - Cleanup macthed nodes
|
||||
* xe_guc_capture_put_matched_nodes - Cleanup matched nodes
|
||||
* @guc: The GuC object
|
||||
*
|
||||
* Free matched node and all nodes with the equal guc_id from
|
||||
|
|
|
@ -22,7 +22,7 @@ enum capture_register_data_type {
|
|||
* struct __guc_mmio_reg_descr - GuC mmio register descriptor
|
||||
*
|
||||
* xe_guc_capture module uses these structures to define a register
|
||||
* (offsets, names, flags,...) that are used at the ADS regisration
|
||||
* (offsets, names, flags,...) that are used at the ADS registration
|
||||
* time as well as during runtime processing and reporting of error-
|
||||
* capture states generated by GuC just prior to engine reset events.
|
||||
*/
|
||||
|
@ -48,7 +48,7 @@ struct __guc_mmio_reg_descr {
|
|||
*
|
||||
* xe_guc_capture module uses these structures to maintain static
|
||||
* tables (per unique platform) that consists of lists of registers
|
||||
* (offsets, names, flags,...) that are used at the ADS regisration
|
||||
* (offsets, names, flags,...) that are used at the ADS registration
|
||||
* time as well as during runtime processing and reporting of error-
|
||||
* capture states generated by GuC just prior to engine reset events.
|
||||
*/
|
||||
|
|
|
@ -710,7 +710,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
|||
--len;
|
||||
++action;
|
||||
|
||||
/* Write H2G ensuring visable before descriptor update */
|
||||
/* Write H2G ensuring visible before descriptor update */
|
||||
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
|
||||
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
|
||||
xe_device_wmb(xe);
|
||||
|
@ -1383,7 +1383,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
|
|||
* this function and nowhere else. Hence, they cannot be different
|
||||
* unless two g2h_read calls are running concurrently. Which is not
|
||||
* possible because it is guarded by ct->fast_lock. And yet, some
|
||||
* discrete platforms are reguarly hitting this error :(.
|
||||
* discrete platforms are regularly hitting this error :(.
|
||||
*
|
||||
* desc_head rolling backwards shouldn't cause any noticeable
|
||||
* problems - just a delay in GuC being allowed to proceed past that
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
|
||||
#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
|
||||
#define RPE_MASK REG_GENMASK(15, 8)
|
||||
#define RPA_MASK REG_GENMASK(31, 16)
|
||||
|
||||
#define GT_PERF_STATUS XE_REG(0x1381b4)
|
||||
#define CAGF_MASK REG_GENMASK(19, 11)
|
||||
|
@ -328,6 +329,19 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
|
|||
freq);
|
||||
}
|
||||
|
||||
static void mtl_update_rpa_value(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
u32 reg;
|
||||
|
||||
if (xe_gt_is_media_type(gt))
|
||||
reg = xe_mmio_read32(>->mmio, MTL_MPA_FREQUENCY);
|
||||
else
|
||||
reg = xe_mmio_read32(>->mmio, MTL_GT_RPA_FREQUENCY);
|
||||
|
||||
pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
|
||||
}
|
||||
|
||||
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
|
@ -341,6 +355,25 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
|
|||
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
|
||||
}
|
||||
|
||||
static void tgl_update_rpa_value(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* For PVC we still need to use fused RP1 as the approximation for RPe
|
||||
* For other platforms than PVC we get the resolved RPe directly from
|
||||
* PCODE at a different register
|
||||
*/
|
||||
if (xe->info.platform == XE_PVC)
|
||||
reg = xe_mmio_read32(>->mmio, PVC_RP_STATE_CAP);
|
||||
else
|
||||
reg = xe_mmio_read32(>->mmio, FREQ_INFO_REC);
|
||||
|
||||
pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
|
||||
}
|
||||
|
||||
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
|
@ -365,10 +398,13 @@ static void pc_update_rp_values(struct xe_guc_pc *pc)
|
|||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
if (GRAPHICS_VERx100(xe) >= 1270)
|
||||
if (GRAPHICS_VERx100(xe) >= 1270) {
|
||||
mtl_update_rpa_value(pc);
|
||||
mtl_update_rpe_value(pc);
|
||||
else
|
||||
} else {
|
||||
tgl_update_rpa_value(pc);
|
||||
tgl_update_rpe_value(pc);
|
||||
}
|
||||
|
||||
/*
|
||||
* RPe is decided at runtime by PCODE. In the rare case where that's
|
||||
|
@ -421,8 +457,8 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
|
|||
* GuC SLPC plays with cur freq request when GuCRC is enabled
|
||||
* Block RC6 for a more reliable read.
|
||||
*/
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -447,6 +483,19 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
|
|||
return pc->rp0_freq;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_pc_get_rpa_freq - Get the RPa freq
|
||||
* @pc: The GuC PC
|
||||
*
|
||||
* Returns: RPa freq.
|
||||
*/
|
||||
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
|
||||
{
|
||||
pc_update_rp_values(pc);
|
||||
|
||||
return pc->rpa_freq;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_guc_pc_get_rpe_freq - Get the RPe freq
|
||||
* @pc: The GuC PC
|
||||
|
@ -481,10 +530,10 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
|
|||
*/
|
||||
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
|
||||
{
|
||||
struct xe_gt *gt = pc_to_gt(pc);
|
||||
unsigned int fw_ref;
|
||||
int ret;
|
||||
|
||||
xe_device_assert_mem_access(pc_to_xe(pc));
|
||||
|
||||
mutex_lock(&pc->freq_lock);
|
||||
if (!pc->freq_ready) {
|
||||
/* Might be in the middle of a gt reset */
|
||||
|
@ -492,24 +541,12 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* GuC SLPC plays with min freq request when GuCRC is enabled
|
||||
* Block RC6 for a more reliable read.
|
||||
*/
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto fw;
|
||||
}
|
||||
|
||||
ret = pc_action_query_task_state(pc);
|
||||
if (ret)
|
||||
goto fw;
|
||||
goto out;
|
||||
|
||||
*freq = pc_get_min_freq(pc);
|
||||
|
||||
fw:
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
out:
|
||||
mutex_unlock(&pc->freq_lock);
|
||||
return ret;
|
||||
|
@ -969,8 +1006,8 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
|
|||
|
||||
xe_gt_assert(gt, xe_device_uc_enabled(xe));
|
||||
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
|
|||
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
|
||||
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
|
||||
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
|
||||
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
|
||||
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
|
||||
u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
|
||||
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
|
||||
|
|
|
@ -17,6 +17,8 @@ struct xe_guc_pc {
|
|||
struct xe_bo *bo;
|
||||
/** @rp0_freq: HW RP0 frequency - The Maximum one */
|
||||
u32 rp0_freq;
|
||||
/** @rpa_freq: HW RPa frequency - The Achievable one */
|
||||
u32 rpa_freq;
|
||||
/** @rpe_freq: HW RPe frequency - The Efficient one */
|
||||
u32 rpe_freq;
|
||||
/** @rpn_freq: HW RPN frequency - The Minimum one */
|
||||
|
|
|
@ -1226,7 +1226,7 @@ sched_enable:
|
|||
enable_scheduling(q);
|
||||
rearm:
|
||||
/*
|
||||
* XXX: Ideally want to adjust timeout based on current exection time
|
||||
* XXX: Ideally want to adjust timeout based on current execution time
|
||||
* but there is not currently an easy way to do in DRM scheduler. With
|
||||
* some thought, do this in a follow up.
|
||||
*/
|
||||
|
|
|
@ -159,7 +159,7 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
|
|||
* This function allocates the storage of the userptr sg table.
|
||||
* It is caller's responsibility to free it calling sg_free_table.
|
||||
*
|
||||
* returns: 0 for succuss; negative error no on failure
|
||||
* returns: 0 for success; negative error no on failure
|
||||
*/
|
||||
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
|
||||
bool is_mm_mmap_locked)
|
||||
|
|
|
@ -1506,7 +1506,7 @@ err_bb:
|
|||
* using the default engine for the updates, they will be performed in the
|
||||
* order they grab the job_mutex. If different engines are used, external
|
||||
* synchronization is needed for overlapping updates to maintain page-table
|
||||
* consistency. Note that the meaing of "overlapping" is that the updates
|
||||
* consistency. Note that the meaning of "overlapping" is that the updates
|
||||
* touch the same page-table, which might be a higher-level page-directory.
|
||||
* If no pipelining is needed, then updates may be performed by the cpu.
|
||||
*
|
||||
|
|
|
@ -490,7 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
|
|||
* least basic xe_gt and xe_guc initialization.
|
||||
*
|
||||
* Since to obtain the value of GMDID_MEDIA we need to use the
|
||||
* media GuC, temporarly tweak the gt type.
|
||||
* media GuC, temporarily tweak the gt type.
|
||||
*/
|
||||
xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
|
||||
|
||||
|
@ -781,7 +781,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
|
|||
* error injectable functions is proper handling of the error code by the
|
||||
* caller for recovery, which is always the case here. The second
|
||||
* requirement is that no state is changed before the first error return.
|
||||
* It is not strictly fullfilled for all initialization functions using the
|
||||
* It is not strictly fulfilled for all initialization functions using the
|
||||
* ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
|
||||
* error cases at probe time, the error code is simply propagated up by the
|
||||
* caller. Therefore there is no consequence on those specific callers when
|
||||
|
|
|
@ -217,7 +217,7 @@ out:
|
|||
*
|
||||
* It returns 0 on success, and -ERROR number on failure, -EINVAL if max
|
||||
* frequency is higher then the minimal, and other errors directly translated
|
||||
* from the PCODE Error returs:
|
||||
* from the PCODE Error returns:
|
||||
* - -ENXIO: "Illegal Command"
|
||||
* - -ETIMEDOUT: "Timed out"
|
||||
* - -EINVAL: "Illegal Data"
|
||||
|
|
|
@ -391,7 +391,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
|
|||
|
||||
/*
|
||||
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
|
||||
* also checks and delets bo entry from user fault list.
|
||||
* also checks and deletes bo entry from user fault list.
|
||||
*/
|
||||
mutex_lock(&xe->mem_access.vram_userfault.lock);
|
||||
list_for_each_entry_safe(bo, on,
|
||||
|
|
|
@ -276,7 +276,7 @@ struct xe_pt_stage_bind_walk {
|
|||
/* Also input, but is updated during the walk*/
|
||||
/** @curs: The DMA address cursor. */
|
||||
struct xe_res_cursor *curs;
|
||||
/** @va_curs_start: The Virtual address coresponding to @curs->start */
|
||||
/** @va_curs_start: The Virtual address corresponding to @curs->start */
|
||||
u64 va_curs_start;
|
||||
|
||||
/* Output */
|
||||
|
|
|
@ -340,3 +340,8 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
|
|||
return dss >= dss_per_gslice;
|
||||
}
|
||||
|
||||
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
|
||||
const struct xe_hw_engine *hwe)
|
||||
{
|
||||
return !IS_SRIOV_VF(gt_to_xe(gt));
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ struct xe_reg_sr;
|
|||
* @ver_end__: Last graphics IP version to match
|
||||
*
|
||||
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
|
||||
* inclusive on boths sides
|
||||
* inclusive on both sides
|
||||
*
|
||||
* Refer to XE_RTP_RULES() for expected usage.
|
||||
*/
|
||||
|
@ -169,7 +169,7 @@ struct xe_reg_sr;
|
|||
* @ver_end__: Last media IP version to match
|
||||
*
|
||||
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
|
||||
* inclusive on boths sides
|
||||
* inclusive on both sides
|
||||
*
|
||||
* Refer to XE_RTP_RULES() for expected usage.
|
||||
*/
|
||||
|
@ -476,4 +476,15 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
|
|||
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
|
||||
const struct xe_hw_engine *hwe);
|
||||
|
||||
/*
|
||||
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
|
||||
*
|
||||
* @gt: GT structure
|
||||
* @hwe: Engine instance
|
||||
*
|
||||
* Returns: true if device is not VF, false otherwise.
|
||||
*/
|
||||
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
|
||||
const struct xe_hw_engine *hwe);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -92,7 +92,7 @@ struct xe_uc_fw {
|
|||
const enum xe_uc_fw_status status;
|
||||
/**
|
||||
* @__status: private firmware load status - only to be used
|
||||
* by firmware laoding code
|
||||
* by firmware loading code
|
||||
*/
|
||||
enum xe_uc_fw_status __status;
|
||||
};
|
||||
|
|
|
@ -1024,7 +1024,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
|||
|
||||
/*
|
||||
* Since userptr pages are not pinned, we can't remove
|
||||
* the notifer until we're sure the GPU is not accessing
|
||||
* the notifier until we're sure the GPU is not accessing
|
||||
* them anymore
|
||||
*/
|
||||
mmu_interval_notifier_remove(&userptr->notifier);
|
||||
|
@ -2107,7 +2107,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
|
|||
}
|
||||
}
|
||||
|
||||
/* Adjust for partial unbind after removin VMA from VM */
|
||||
/* Adjust for partial unbind after removing VMA from VM */
|
||||
if (!err) {
|
||||
op->base.remap.unmap->va->va.addr = op->remap.start;
|
||||
op->base.remap.unmap->va->va.range = op->remap.range;
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
GRAPHICS_VERSION(2004)
|
||||
22019338487 MEDIA_VERSION(2000)
|
||||
GRAPHICS_VERSION(2001)
|
||||
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
|
||||
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
|
||||
22019338487_display PLATFORM(LUNARLAKE)
|
||||
16023588340 GRAPHICS_VERSION(2001)
|
||||
14019789679 GRAPHICS_VERSION(1255)
|
||||
|
|
|
@ -858,6 +858,7 @@
|
|||
MACRO__(0xB092, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A0, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A1, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A2, ## __VA_ARGS__)
|
||||
MACRO__(0xB0A2, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0B0, ## __VA_ARGS__)
|
||||
|
||||
#endif /* __PCIIDS_H__ */
|
||||
|
|
|
@ -1,235 +0,0 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _XE_PCIIDS_H_
|
||||
#define _XE_PCIIDS_H_
|
||||
|
||||
/*
|
||||
* Lists below can be turned into initializers for a struct pci_device_id
|
||||
* by defining INTEL_VGA_DEVICE:
|
||||
*
|
||||
* #define INTEL_VGA_DEVICE(id, info) { \
|
||||
* 0x8086, id, \
|
||||
* ~0, ~0, \
|
||||
* 0x030000, 0xff0000, \
|
||||
* (unsigned long) info }
|
||||
*
|
||||
* And then calling like:
|
||||
*
|
||||
* XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
|
||||
*
|
||||
* To turn them into something else, just provide a different macro passed as
|
||||
* first argument.
|
||||
*/
|
||||
|
||||
/* TGL */
|
||||
#define XE_TGL_GT1_IDS(MACRO__, ...) \
|
||||
MACRO__(0x9A60, ## __VA_ARGS__), \
|
||||
MACRO__(0x9A68, ## __VA_ARGS__), \
|
||||
MACRO__(0x9A70, ## __VA_ARGS__)
|
||||
|
||||
#define XE_TGL_GT2_IDS(MACRO__, ...) \
|
||||
MACRO__(0x9A40, ## __VA_ARGS__), \
|
||||
MACRO__(0x9A49, ## __VA_ARGS__), \
|
||||
MACRO__(0x9A59, ## __VA_ARGS__), \
|
||||
MACRO__(0x9A78, ## __VA_ARGS__), \
|
||||
MACRO__(0x9AC0, ## __VA_ARGS__), \
|
||||
MACRO__(0x9AC9, ## __VA_ARGS__), \
|
||||
MACRO__(0x9AD9, ## __VA_ARGS__), \
|
||||
MACRO__(0x9AF8, ## __VA_ARGS__)
|
||||
|
||||
#define XE_TGL_IDS(MACRO__, ...) \
|
||||
XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
|
||||
XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
/* RKL */
|
||||
#define XE_RKL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x4C80, ## __VA_ARGS__), \
|
||||
MACRO__(0x4C8A, ## __VA_ARGS__), \
|
||||
MACRO__(0x4C8B, ## __VA_ARGS__), \
|
||||
MACRO__(0x4C8C, ## __VA_ARGS__), \
|
||||
MACRO__(0x4C90, ## __VA_ARGS__), \
|
||||
MACRO__(0x4C9A, ## __VA_ARGS__)
|
||||
|
||||
/* DG1 */
|
||||
#define XE_DG1_IDS(MACRO__, ...) \
|
||||
MACRO__(0x4905, ## __VA_ARGS__), \
|
||||
MACRO__(0x4906, ## __VA_ARGS__), \
|
||||
MACRO__(0x4907, ## __VA_ARGS__), \
|
||||
MACRO__(0x4908, ## __VA_ARGS__), \
|
||||
MACRO__(0x4909, ## __VA_ARGS__)
|
||||
|
||||
/* ADL-S */
|
||||
#define XE_ADLS_IDS(MACRO__, ...) \
|
||||
MACRO__(0x4680, ## __VA_ARGS__), \
|
||||
MACRO__(0x4682, ## __VA_ARGS__), \
|
||||
MACRO__(0x4688, ## __VA_ARGS__), \
|
||||
MACRO__(0x468A, ## __VA_ARGS__), \
|
||||
MACRO__(0x468B, ## __VA_ARGS__), \
|
||||
MACRO__(0x4690, ## __VA_ARGS__), \
|
||||
MACRO__(0x4692, ## __VA_ARGS__), \
|
||||
MACRO__(0x4693, ## __VA_ARGS__)
|
||||
|
||||
/* ADL-P */
|
||||
#define XE_ADLP_IDS(MACRO__, ...) \
|
||||
MACRO__(0x46A0, ## __VA_ARGS__), \
|
||||
MACRO__(0x46A1, ## __VA_ARGS__), \
|
||||
MACRO__(0x46A2, ## __VA_ARGS__), \
|
||||
MACRO__(0x46A3, ## __VA_ARGS__), \
|
||||
MACRO__(0x46A6, ## __VA_ARGS__), \
|
||||
MACRO__(0x46A8, ## __VA_ARGS__), \
|
||||
MACRO__(0x46AA, ## __VA_ARGS__), \
|
||||
MACRO__(0x462A, ## __VA_ARGS__), \
|
||||
MACRO__(0x4626, ## __VA_ARGS__), \
|
||||
MACRO__(0x4628, ## __VA_ARGS__), \
|
||||
MACRO__(0x46B0, ## __VA_ARGS__), \
|
||||
MACRO__(0x46B1, ## __VA_ARGS__), \
|
||||
MACRO__(0x46B2, ## __VA_ARGS__), \
|
||||
MACRO__(0x46B3, ## __VA_ARGS__), \
|
||||
MACRO__(0x46C0, ## __VA_ARGS__), \
|
||||
MACRO__(0x46C1, ## __VA_ARGS__), \
|
||||
MACRO__(0x46C2, ## __VA_ARGS__), \
|
||||
MACRO__(0x46C3, ## __VA_ARGS__)
|
||||
|
||||
/* ADL-N */
|
||||
#define XE_ADLN_IDS(MACRO__, ...) \
|
||||
MACRO__(0x46D0, ## __VA_ARGS__), \
|
||||
MACRO__(0x46D1, ## __VA_ARGS__), \
|
||||
MACRO__(0x46D2, ## __VA_ARGS__), \
|
||||
MACRO__(0x46D3, ## __VA_ARGS__), \
|
||||
MACRO__(0x46D4, ## __VA_ARGS__)
|
||||
|
||||
/* RPL-S */
|
||||
#define XE_RPLS_IDS(MACRO__, ...) \
|
||||
MACRO__(0xA780, ## __VA_ARGS__), \
|
||||
MACRO__(0xA781, ## __VA_ARGS__), \
|
||||
MACRO__(0xA782, ## __VA_ARGS__), \
|
||||
MACRO__(0xA783, ## __VA_ARGS__), \
|
||||
MACRO__(0xA788, ## __VA_ARGS__), \
|
||||
MACRO__(0xA789, ## __VA_ARGS__), \
|
||||
MACRO__(0xA78A, ## __VA_ARGS__), \
|
||||
MACRO__(0xA78B, ## __VA_ARGS__)
|
||||
|
||||
/* RPL-U */
|
||||
#define XE_RPLU_IDS(MACRO__, ...) \
|
||||
MACRO__(0xA721, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7A1, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7A9, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7AC, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7AD, ## __VA_ARGS__)
|
||||
|
||||
/* RPL-P */
|
||||
#define XE_RPLP_IDS(MACRO__, ...) \
|
||||
MACRO__(0xA720, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7A0, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7A8, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7AA, ## __VA_ARGS__), \
|
||||
MACRO__(0xA7AB, ## __VA_ARGS__)
|
||||
|
||||
/* DG2 */
|
||||
#define XE_DG2_G10_IDS(MACRO__, ...) \
|
||||
MACRO__(0x5690, ## __VA_ARGS__), \
|
||||
MACRO__(0x5691, ## __VA_ARGS__), \
|
||||
MACRO__(0x5692, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A0, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A1, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A2, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BE, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BF, ## __VA_ARGS__)
|
||||
|
||||
#define XE_DG2_G11_IDS(MACRO__, ...) \
|
||||
MACRO__(0x5693, ## __VA_ARGS__), \
|
||||
MACRO__(0x5694, ## __VA_ARGS__), \
|
||||
MACRO__(0x5695, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A5, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A6, ## __VA_ARGS__), \
|
||||
MACRO__(0x56B0, ## __VA_ARGS__), \
|
||||
MACRO__(0x56B1, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BA, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BB, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BC, ## __VA_ARGS__), \
|
||||
MACRO__(0x56BD, ## __VA_ARGS__)
|
||||
|
||||
#define XE_DG2_G12_IDS(MACRO__, ...) \
|
||||
MACRO__(0x5696, ## __VA_ARGS__), \
|
||||
MACRO__(0x5697, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A3, ## __VA_ARGS__), \
|
||||
MACRO__(0x56A4, ## __VA_ARGS__), \
|
||||
MACRO__(0x56B2, ## __VA_ARGS__), \
|
||||
MACRO__(0x56B3, ## __VA_ARGS__)
|
||||
|
||||
#define XE_DG2_IDS(MACRO__, ...) \
|
||||
XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
|
||||
XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
|
||||
XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
#define XE_ATS_M150_IDS(MACRO__, ...) \
|
||||
MACRO__(0x56C0, ## __VA_ARGS__), \
|
||||
MACRO__(0x56C2, ## __VA_ARGS__)
|
||||
|
||||
#define XE_ATS_M75_IDS(MACRO__, ...) \
|
||||
MACRO__(0x56C1, ## __VA_ARGS__)
|
||||
|
||||
#define XE_ATS_M_IDS(MACRO__, ...) \
|
||||
XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
|
||||
XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
|
||||
|
||||
/* ARL */
|
||||
#define XE_ARL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D41, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D51, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D67, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD1, ## __VA_ARGS__), \
|
||||
MACRO__(0xB640, ## __VA_ARGS__)
|
||||
|
||||
/* MTL */
|
||||
#define XE_MTL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x7D40, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D45, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D55, ## __VA_ARGS__), \
|
||||
MACRO__(0x7D60, ## __VA_ARGS__), \
|
||||
MACRO__(0x7DD5, ## __VA_ARGS__)
|
||||
|
||||
/* PVC */
|
||||
#define XE_PVC_IDS(MACRO__, ...) \
|
||||
MACRO__(0x0B69, ## __VA_ARGS__), \
|
||||
MACRO__(0x0B6E, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD4, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD5, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD6, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD7, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD8, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BD9, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BDA, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BDB, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BE0, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BE1, ## __VA_ARGS__), \
|
||||
MACRO__(0x0BE5, ## __VA_ARGS__)
|
||||
|
||||
#define XE_LNL_IDS(MACRO__, ...) \
|
||||
MACRO__(0x6420, ## __VA_ARGS__), \
|
||||
MACRO__(0x64A0, ## __VA_ARGS__), \
|
||||
MACRO__(0x64B0, ## __VA_ARGS__)
|
||||
|
||||
#define XE_BMG_IDS(MACRO__, ...) \
|
||||
MACRO__(0xE202, ## __VA_ARGS__), \
|
||||
MACRO__(0xE20B, ## __VA_ARGS__), \
|
||||
MACRO__(0xE20C, ## __VA_ARGS__), \
|
||||
MACRO__(0xE20D, ## __VA_ARGS__), \
|
||||
MACRO__(0xE212, ## __VA_ARGS__)
|
||||
|
||||
#define XE_PTL_IDS(MACRO__, ...) \
|
||||
MACRO__(0xB080, ## __VA_ARGS__), \
|
||||
MACRO__(0xB081, ## __VA_ARGS__), \
|
||||
MACRO__(0xB082, ## __VA_ARGS__), \
|
||||
MACRO__(0xB090, ## __VA_ARGS__), \
|
||||
MACRO__(0xB091, ## __VA_ARGS__), \
|
||||
MACRO__(0xB092, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A0, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A1, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0A2, ## __VA_ARGS__), \
|
||||
MACRO__(0xB0B0, ## __VA_ARGS__)
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user