linux-yocto/drivers/gpu/drm/xe/xe_exec_queue.h
Francois Dugast 0d92cd8935 drm/xe/exec_queue: Prepare last fence for hw engine group resume context
Ensure we can safely take a ref of the exec queue's last fence from the
context of resuming jobs from the hw engine group. The locking requirements
differ from the general case, hence the introduction of this new function.

v2: Add kernel doc, rework the code to prevent code duplication

v3: Fix kernel doc, remove now unnecessary lockdep variants (Matt Brost)

v4: Remove new put function (Matt Brost)

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240809155156.1955925-7-francois.dugast@intel.com
2024-08-17 18:31:54 -07:00

89 lines
2.7 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_EXEC_QUEUE_H_
#define _XE_EXEC_QUEUE_H_
#include "xe_exec_queue_types.h"
#include "xe_vm_types.h"
struct drm_device;
struct drm_file;
struct xe_device;
struct xe_file;
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hw_engine, u32 flags,
u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class,
u32 flags, u64 extensions);
struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
struct xe_tile *tile,
u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
static inline struct xe_exec_queue *
xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
{
if (kref_get_unless_zero(&q->refcount))
return q;
return NULL;
}
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
{
kref_get(&q->refcount);
return q;
}
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
{
kref_put(&q->refcount, xe_exec_queue_destroy);
}
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
{
return q->width > 1;
}
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_exec_queue_kill(struct xe_exec_queue *q);
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
#endif