mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 21:35:46 +02:00
tee: optee: support tracking system threads
Adds support in the OP-TEE driver to keep track of reserved system threads. The logic allows one OP-TEE thread to be reserved to TEE system sessions. The optee_cq_*() functions are updated to handle this if enabled, that is when TEE describes how many thread context it supports and when at least 1 session has registered as a system session (using tee_client_system_session()). For sake of simplicity, initialization of call queue management is factorized into new helper function optee_cq_init(). The SMC ABI part of the driver enables this tracking, but the FF-A ABI part does not. Co-developed-by: Jens Wiklander <jens.wiklander@linaro.org> Co-developed-by: Sumit Garg <sumit.garg@linaro.org> Signed-off-by: Sumit Garg <sumit.garg@linaro.org> Signed-off-by: Etienne Carriere <etienne.carriere@foss.st.com> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
This commit is contained in:
parent
a9214a8883
commit
45bc2c9b5b
|
@ -39,9 +39,29 @@ struct optee_shm_arg_entry {
|
||||||
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
|
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void optee_cq_init(struct optee_call_queue *cq, int thread_count)
|
||||||
|
{
|
||||||
|
mutex_init(&cq->mutex);
|
||||||
|
INIT_LIST_HEAD(&cq->waiters);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If cq->total_thread_count is 0 then we're not trying to keep
|
||||||
|
* track of how many free threads we have, instead we're relying on
|
||||||
|
* the secure world to tell us when we're out of thread and have to
|
||||||
|
* wait for another thread to become available.
|
||||||
|
*/
|
||||||
|
cq->total_thread_count = thread_count;
|
||||||
|
cq->free_thread_count = thread_count;
|
||||||
|
}
|
||||||
|
|
||||||
void optee_cq_wait_init(struct optee_call_queue *cq,
|
void optee_cq_wait_init(struct optee_call_queue *cq,
|
||||||
struct optee_call_waiter *w, bool sys_thread)
|
struct optee_call_waiter *w, bool sys_thread)
|
||||||
{
|
{
|
||||||
|
unsigned int free_thread_threshold;
|
||||||
|
bool need_wait = false;
|
||||||
|
|
||||||
|
memset(w, 0, sizeof(*w));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're preparing to make a call to secure world. In case we can't
|
* We're preparing to make a call to secure world. In case we can't
|
||||||
* allocate a thread in secure world we'll end up waiting in
|
* allocate a thread in secure world we'll end up waiting in
|
||||||
|
@ -60,8 +80,38 @@ void optee_cq_wait_init(struct optee_call_queue *cq,
|
||||||
*/
|
*/
|
||||||
init_completion(&w->c);
|
init_completion(&w->c);
|
||||||
list_add_tail(&w->list_node, &cq->waiters);
|
list_add_tail(&w->list_node, &cq->waiters);
|
||||||
|
w->sys_thread = sys_thread;
|
||||||
|
|
||||||
|
if (cq->total_thread_count) {
|
||||||
|
if (sys_thread || !cq->sys_thread_req_count)
|
||||||
|
free_thread_threshold = 0;
|
||||||
|
else
|
||||||
|
free_thread_threshold = 1;
|
||||||
|
|
||||||
|
if (cq->free_thread_count > free_thread_threshold)
|
||||||
|
cq->free_thread_count--;
|
||||||
|
else
|
||||||
|
need_wait = true;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&cq->mutex);
|
mutex_unlock(&cq->mutex);
|
||||||
|
|
||||||
|
while (need_wait) {
|
||||||
|
optee_cq_wait_for_completion(cq, w);
|
||||||
|
mutex_lock(&cq->mutex);
|
||||||
|
|
||||||
|
if (sys_thread || !cq->sys_thread_req_count)
|
||||||
|
free_thread_threshold = 0;
|
||||||
|
else
|
||||||
|
free_thread_threshold = 1;
|
||||||
|
|
||||||
|
if (cq->free_thread_count > free_thread_threshold) {
|
||||||
|
cq->free_thread_count--;
|
||||||
|
need_wait = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&cq->mutex);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
||||||
|
@ -83,6 +133,14 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
|
||||||
{
|
{
|
||||||
struct optee_call_waiter *w;
|
struct optee_call_waiter *w;
|
||||||
|
|
||||||
|
/* Wake a waiting system session if any, prior to a normal session */
|
||||||
|
list_for_each_entry(w, &cq->waiters, list_node) {
|
||||||
|
if (w->sys_thread && !completion_done(&w->c)) {
|
||||||
|
complete(&w->c);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
list_for_each_entry(w, &cq->waiters, list_node) {
|
list_for_each_entry(w, &cq->waiters, list_node) {
|
||||||
if (!completion_done(&w->c)) {
|
if (!completion_done(&w->c)) {
|
||||||
complete(&w->c);
|
complete(&w->c);
|
||||||
|
@ -104,6 +162,8 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
|
||||||
/* Get out of the list */
|
/* Get out of the list */
|
||||||
list_del(&w->list_node);
|
list_del(&w->list_node);
|
||||||
|
|
||||||
|
cq->free_thread_count++;
|
||||||
|
|
||||||
/* Wake up one eventual waiting task */
|
/* Wake up one eventual waiting task */
|
||||||
optee_cq_complete_one(cq);
|
optee_cq_complete_one(cq);
|
||||||
|
|
||||||
|
@ -119,6 +179,28 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
|
||||||
mutex_unlock(&cq->mutex);
|
mutex_unlock(&cq->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Count registered system sessions to reserved a system thread or not */
|
||||||
|
static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
|
||||||
|
{
|
||||||
|
if (cq->total_thread_count <= 1)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
mutex_lock(&cq->mutex);
|
||||||
|
cq->sys_thread_req_count++;
|
||||||
|
mutex_unlock(&cq->mutex);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
|
||||||
|
{
|
||||||
|
mutex_lock(&cq->mutex);
|
||||||
|
cq->sys_thread_req_count--;
|
||||||
|
/* If there's someone waiting, let it resume */
|
||||||
|
optee_cq_complete_one(cq);
|
||||||
|
mutex_unlock(&cq->mutex);
|
||||||
|
}
|
||||||
|
|
||||||
/* Requires the filpstate mutex to be held */
|
/* Requires the filpstate mutex to be held */
|
||||||
static struct optee_session *find_session(struct optee_context_data *ctxdata,
|
static struct optee_session *find_session(struct optee_context_data *ctxdata,
|
||||||
u32 session_id)
|
u32 session_id)
|
||||||
|
@ -361,6 +443,27 @@ out:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int optee_system_session(struct tee_context *ctx, u32 session)
|
||||||
|
{
|
||||||
|
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||||
|
struct optee_context_data *ctxdata = ctx->data;
|
||||||
|
struct optee_session *sess;
|
||||||
|
int rc = -EINVAL;
|
||||||
|
|
||||||
|
mutex_lock(&ctxdata->mutex);
|
||||||
|
|
||||||
|
sess = find_session(ctxdata, session);
|
||||||
|
if (sess && (sess->use_sys_thread ||
|
||||||
|
optee_cq_incr_sys_thread_count(&optee->call_queue))) {
|
||||||
|
sess->use_sys_thread = true;
|
||||||
|
rc = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&ctxdata->mutex);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
int optee_close_session_helper(struct tee_context *ctx, u32 session,
|
int optee_close_session_helper(struct tee_context *ctx, u32 session,
|
||||||
bool system_thread)
|
bool system_thread)
|
||||||
{
|
{
|
||||||
|
@ -380,6 +483,9 @@ int optee_close_session_helper(struct tee_context *ctx, u32 session,
|
||||||
|
|
||||||
optee_free_msg_arg(ctx, entry, offs);
|
optee_free_msg_arg(ctx, entry, offs);
|
||||||
|
|
||||||
|
if (system_thread)
|
||||||
|
optee_cq_decr_sys_thread_count(&optee->call_queue);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -853,8 +853,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_unreg_supp_teedev;
|
goto err_unreg_supp_teedev;
|
||||||
mutex_init(&optee->ffa.mutex);
|
mutex_init(&optee->ffa.mutex);
|
||||||
mutex_init(&optee->call_queue.mutex);
|
optee_cq_init(&optee->call_queue, 0);
|
||||||
INIT_LIST_HEAD(&optee->call_queue.waiters);
|
|
||||||
optee_supp_init(&optee->supp);
|
optee_supp_init(&optee->supp);
|
||||||
optee_shm_arg_cache_init(optee, arg_cache_flags);
|
optee_shm_arg_cache_init(optee, arg_cache_flags);
|
||||||
ffa_dev_set_drvdata(ffa_dev, optee);
|
ffa_dev_set_drvdata(ffa_dev, optee);
|
||||||
|
|
|
@ -40,15 +40,33 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
|
||||||
unsigned long, unsigned long,
|
unsigned long, unsigned long,
|
||||||
struct arm_smccc_res *);
|
struct arm_smccc_res *);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct optee_call_waiter - TEE entry may need to wait for a free TEE thread
|
||||||
|
* @list_node Reference in waiters list
|
||||||
|
* @c Waiting completion reference
|
||||||
|
* @sys_thread True if waiter belongs to a system thread
|
||||||
|
*/
|
||||||
struct optee_call_waiter {
|
struct optee_call_waiter {
|
||||||
struct list_head list_node;
|
struct list_head list_node;
|
||||||
struct completion c;
|
struct completion c;
|
||||||
|
bool sys_thread;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* struct optee_call_queue - OP-TEE call queue management
|
||||||
|
* @mutex Serializes access to this struct
|
||||||
|
* @waiters List of threads waiting to enter OP-TEE
|
||||||
|
* @total_thread_count Overall number of thread context in OP-TEE or 0
|
||||||
|
* @free_thread_count Number of threads context free in OP-TEE
|
||||||
|
* @sys_thread_req_count Number of registered system thread sessions
|
||||||
|
*/
|
||||||
struct optee_call_queue {
|
struct optee_call_queue {
|
||||||
/* Serializes access to this struct */
|
/* Serializes access to this struct */
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct list_head waiters;
|
struct list_head waiters;
|
||||||
|
int total_thread_count;
|
||||||
|
int free_thread_count;
|
||||||
|
int sys_thread_req_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct optee_notif {
|
struct optee_notif {
|
||||||
|
@ -252,6 +270,7 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
|
||||||
int optee_open_session(struct tee_context *ctx,
|
int optee_open_session(struct tee_context *ctx,
|
||||||
struct tee_ioctl_open_session_arg *arg,
|
struct tee_ioctl_open_session_arg *arg,
|
||||||
struct tee_param *param);
|
struct tee_param *param);
|
||||||
|
int optee_system_session(struct tee_context *ctx, u32 session);
|
||||||
int optee_close_session_helper(struct tee_context *ctx, u32 session,
|
int optee_close_session_helper(struct tee_context *ctx, u32 session,
|
||||||
bool system_thread);
|
bool system_thread);
|
||||||
int optee_close_session(struct tee_context *ctx, u32 session);
|
int optee_close_session(struct tee_context *ctx, u32 session);
|
||||||
|
@ -301,6 +320,7 @@ static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
|
||||||
mp->u.value.c = p->u.value.c;
|
mp->u.value.c = p->u.value.c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void optee_cq_init(struct optee_call_queue *cq, int thread_count);
|
||||||
void optee_cq_wait_init(struct optee_call_queue *cq,
|
void optee_cq_wait_init(struct optee_call_queue *cq,
|
||||||
struct optee_call_waiter *w, bool sys_thread);
|
struct optee_call_waiter *w, bool sys_thread);
|
||||||
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
|
||||||
|
|
|
@ -1212,6 +1212,7 @@ static const struct tee_driver_ops optee_clnt_ops = {
|
||||||
.release = optee_release,
|
.release = optee_release,
|
||||||
.open_session = optee_open_session,
|
.open_session = optee_open_session,
|
||||||
.close_session = optee_close_session,
|
.close_session = optee_close_session,
|
||||||
|
.system_session = optee_system_session,
|
||||||
.invoke_func = optee_invoke_func,
|
.invoke_func = optee_invoke_func,
|
||||||
.cancel_req = optee_cancel_req,
|
.cancel_req = optee_cancel_req,
|
||||||
.shm_register = optee_shm_register,
|
.shm_register = optee_shm_register,
|
||||||
|
@ -1359,6 +1360,16 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
|
||||||
|
{
|
||||||
|
struct arm_smccc_res res;
|
||||||
|
|
||||||
|
invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
|
||||||
|
if (res.a0)
|
||||||
|
return 0;
|
||||||
|
return res.a1;
|
||||||
|
}
|
||||||
|
|
||||||
static struct tee_shm_pool *
|
static struct tee_shm_pool *
|
||||||
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||||
{
|
{
|
||||||
|
@ -1611,6 +1622,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||||
struct optee *optee = NULL;
|
struct optee *optee = NULL;
|
||||||
void *memremaped_shm = NULL;
|
void *memremaped_shm = NULL;
|
||||||
unsigned int rpc_param_count;
|
unsigned int rpc_param_count;
|
||||||
|
unsigned int thread_count;
|
||||||
struct tee_device *teedev;
|
struct tee_device *teedev;
|
||||||
struct tee_context *ctx;
|
struct tee_context *ctx;
|
||||||
u32 max_notif_value;
|
u32 max_notif_value;
|
||||||
|
@ -1638,6 +1650,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
thread_count = optee_msg_get_thread_count(invoke_fn);
|
||||||
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
|
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
|
||||||
&max_notif_value,
|
&max_notif_value,
|
||||||
&rpc_param_count)) {
|
&rpc_param_count)) {
|
||||||
|
@ -1727,8 +1740,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_unreg_supp_teedev;
|
goto err_unreg_supp_teedev;
|
||||||
|
|
||||||
mutex_init(&optee->call_queue.mutex);
|
optee_cq_init(&optee->call_queue, thread_count);
|
||||||
INIT_LIST_HEAD(&optee->call_queue.waiters);
|
|
||||||
optee_supp_init(&optee->supp);
|
optee_supp_init(&optee->supp);
|
||||||
optee->smc.memremaped_shm = memremaped_shm;
|
optee->smc.memremaped_shm = memremaped_shm;
|
||||||
optee->pool = pool;
|
optee->pool = pool;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user