linux-yocto/drivers/gpu/drm/xe/xe_guc_ct.h
Nirmoy Das eb523ec382 drm/xe/guc: Configure TLB timeout based on CT buffer size
GuC TLB invalidation depends on GuC to process the request from the CT
queue and then the real time to invalidate TLB. Add a function to return
overestimated possible time a TLB inval H2G might take which can be used
as timeout value for TLB invalidation wait time.

v4: Make sure CTB is in 4K blocks(Michal) and other doc fixes
v3: Pass CT to xe_guc_ct_queue_proc_time_jiffies() (Michal)
    Add tlb_timeout_jiffies() that replaces TLB_TIMEOUT(Michal)
v2: Address reviews from Michal.

Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1622
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Suggested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240628085845.2369-1-nirmoy.das@intel.com
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
2024-07-01 17:38:48 +02:00

70 lines
2.0 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_GUC_CT_H_
#define _XE_GUC_CT_H_
#include "xe_guc_ct_types.h"
struct drm_printer;
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *
xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic);
void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
struct drm_printer *p);
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;
}
static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
{
if (!xe_guc_ct_enabled(ct))
return;
wake_up_all(&ct->wq);
queue_work(ct->g2h_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
/* Basic CT send / receives */
int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 g2h_len, u32 num_g2h);
int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 g2h_len, u32 num_g2h);
int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer);
static inline int
xe_guc_ct_send_block(struct xe_guc_ct *ct, const u32 *action, u32 len)
{
return xe_guc_ct_send_recv(ct, action, len, NULL);
}
/* This is only version of the send CT you can call from a G2H handler */
int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action,
u32 len);
/* Can't fail because a GT reset is in progress */
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer);
static inline int
xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
{
return xe_guc_ct_send_recv_no_fail(ct, action, len, NULL);
}
long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct);
#endif