linux-yocto/drivers/gpu/drm/xe/xe_hw_fence.c
Matthew Brost dd3e840a33
drm/xe: Drop HW fence pointer to HW fence ctx
The HW fence ctx objects are not ref counted rather tied to the life of
an LRC object. HW fences reference the HW fence ctx, HW fences can
outlive LRCs thus resulting in UAF. Drop the  HW fence pointer to HW
fence ctx rather just store what is needed directly in HW fence.

v2:
 - Fix typo in commit (Ashutosh)
 - Use snprintf (Ashutosh)

Fixes: dd08ebf6c3 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240815193522.16008-1-matthew.brost@intel.com
(cherry picked from commit 60db6f540a)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
2024-08-21 11:53:12 -04:00

266 lines
6.4 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "xe_hw_fence.h"
#include <linux/device.h>
#include <linux/slab.h>
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine.h"
#include "xe_macros.h"
#include "xe_map.h"
#include "xe_trace.h"
static struct kmem_cache *xe_hw_fence_slab;
int __init xe_hw_fence_module_init(void)
{
xe_hw_fence_slab = kmem_cache_create("xe_hw_fence",
sizeof(struct xe_hw_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!xe_hw_fence_slab)
return -ENOMEM;
return 0;
}
void xe_hw_fence_module_exit(void)
{
rcu_barrier();
kmem_cache_destroy(xe_hw_fence_slab);
}
static struct xe_hw_fence *fence_alloc(void)
{
return kmem_cache_zalloc(xe_hw_fence_slab, GFP_KERNEL);
}
static void fence_free(struct rcu_head *rcu)
{
struct xe_hw_fence *fence =
container_of(rcu, struct xe_hw_fence, dma.rcu);
if (!WARN_ON_ONCE(!fence))
kmem_cache_free(xe_hw_fence_slab, fence);
}
static void hw_fence_irq_run_cb(struct irq_work *work)
{
struct xe_hw_fence_irq *irq = container_of(work, typeof(*irq), work);
struct xe_hw_fence *fence, *next;
bool tmp;
tmp = dma_fence_begin_signalling();
spin_lock(&irq->lock);
if (irq->enabled) {
list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
struct dma_fence *dma_fence = &fence->dma;
trace_xe_hw_fence_try_signal(fence);
if (dma_fence_is_signaled_locked(dma_fence)) {
trace_xe_hw_fence_signal(fence);
list_del_init(&fence->irq_link);
dma_fence_put(dma_fence);
}
}
}
spin_unlock(&irq->lock);
dma_fence_end_signalling(tmp);
}
void xe_hw_fence_irq_init(struct xe_hw_fence_irq *irq)
{
spin_lock_init(&irq->lock);
init_irq_work(&irq->work, hw_fence_irq_run_cb);
INIT_LIST_HEAD(&irq->pending);
irq->enabled = true;
}
void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
{
struct xe_hw_fence *fence, *next;
unsigned long flags;
int err;
bool tmp;
if (XE_WARN_ON(!list_empty(&irq->pending))) {
tmp = dma_fence_begin_signalling();
spin_lock_irqsave(&irq->lock, flags);
list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
list_del_init(&fence->irq_link);
err = dma_fence_signal_locked(&fence->dma);
dma_fence_put(&fence->dma);
XE_WARN_ON(err);
}
spin_unlock_irqrestore(&irq->lock, flags);
dma_fence_end_signalling(tmp);
}
}
void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
{
irq_work_queue(&irq->work);
}
void xe_hw_fence_irq_stop(struct xe_hw_fence_irq *irq)
{
spin_lock_irq(&irq->lock);
irq->enabled = false;
spin_unlock_irq(&irq->lock);
}
void xe_hw_fence_irq_start(struct xe_hw_fence_irq *irq)
{
spin_lock_irq(&irq->lock);
irq->enabled = true;
spin_unlock_irq(&irq->lock);
irq_work_queue(&irq->work);
}
void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
struct xe_hw_fence_irq *irq, const char *name)
{
ctx->gt = gt;
ctx->irq = irq;
ctx->dma_fence_ctx = dma_fence_context_alloc(1);
ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
snprintf(ctx->name, sizeof(ctx->name), "%s", name);
}
void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
{
}
static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence);
static struct xe_hw_fence_irq *xe_hw_fence_irq(struct xe_hw_fence *fence)
{
return container_of(fence->dma.lock, struct xe_hw_fence_irq, lock);
}
static const char *xe_hw_fence_get_driver_name(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
return dev_name(fence->xe->drm.dev);
}
static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
return fence->name;
}
static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
struct xe_device *xe = fence->xe;
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
return dma_fence->error ||
!__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
}
static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
struct xe_hw_fence_irq *irq = xe_hw_fence_irq(fence);
dma_fence_get(dma_fence);
list_add_tail(&fence->irq_link, &irq->pending);
/* SW completed (no HW IRQ) so kick handler to signal fence */
if (xe_hw_fence_signaled(dma_fence))
xe_hw_fence_irq_run(irq);
return true;
}
static void xe_hw_fence_release(struct dma_fence *dma_fence)
{
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
XE_WARN_ON(!list_empty(&fence->irq_link));
call_rcu(&dma_fence->rcu, fence_free);
}
static const struct dma_fence_ops xe_hw_fence_ops = {
.get_driver_name = xe_hw_fence_get_driver_name,
.get_timeline_name = xe_hw_fence_get_timeline_name,
.enable_signaling = xe_hw_fence_enable_signaling,
.signaled = xe_hw_fence_signaled,
.release = xe_hw_fence_release,
};
static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
{
if (XE_WARN_ON(fence->ops != &xe_hw_fence_ops))
return NULL;
return container_of(fence, struct xe_hw_fence, dma);
}
/**
* xe_hw_fence_alloc() - Allocate an hw fence.
*
* Allocate but don't initialize an hw fence.
*
* Return: Pointer to the allocated fence or
* negative error pointer on error.
*/
struct dma_fence *xe_hw_fence_alloc(void)
{
struct xe_hw_fence *hw_fence = fence_alloc();
if (!hw_fence)
return ERR_PTR(-ENOMEM);
return &hw_fence->dma;
}
/**
* xe_hw_fence_free() - Free an hw fence.
* @fence: Pointer to the fence to free.
*
* Frees an hw fence that hasn't yet been
* initialized.
*/
void xe_hw_fence_free(struct dma_fence *fence)
{
fence_free(&fence->rcu);
}
/**
* xe_hw_fence_init() - Initialize an hw fence.
* @fence: Pointer to the fence to initialize.
* @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
* @seqno_map: Pointer to the map into where the seqno is blitted.
*
* Initializes a pre-allocated hw fence.
* After initialization, the fence is subject to normal
* dma-fence refcounting.
*/
void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
struct iosys_map seqno_map)
{
struct xe_hw_fence *hw_fence =
container_of(fence, typeof(*hw_fence), dma);
hw_fence->xe = gt_to_xe(ctx->gt);
snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name);
hw_fence->seqno_map = seqno_map;
INIT_LIST_HEAD(&hw_fence->irq_link);
dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock,
ctx->dma_fence_ctx, ctx->next_seqno++);
trace_xe_hw_fence_create(hw_fence);
}