/* * How much memory we allocate for each entry. This doesn't have to be a * single page, but it makes sense to keep at least keep it as multiples of * the page size.
*/ #define SHM_ENTRY_SIZE PAGE_SIZE
/* * We need to have a compile time constant to be able to determine the * maximum needed size of the bit field.
*/ #define MIN_ARG_SIZE OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT) #define MAX_ARG_COUNT_PER_ENTRY (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
/* * Shared memory for argument structs are cached here. The number of * arguments structs that can fit is determined at runtime depending on the * needed RPC parameter count reported by secure world * (optee->rpc_param_count).
*/ struct optee_shm_arg_entry { struct list_head list_node; struct tee_shm *shm;
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
};
void optee_cq_init(struct optee_call_queue *cq, int thread_count)
{
mutex_init(&cq->mutex);
INIT_LIST_HEAD(&cq->waiters);
/* * If cq->total_thread_count is 0 then we're not trying to keep * track of how many free threads we have, instead we're relying on * the secure world to tell us when we're out of thread and have to * wait for another thread to become available.
*/
cq->total_thread_count = thread_count;
cq->free_thread_count = thread_count;
}
/* * We're preparing to make a call to secure world. In case we can't * allocate a thread in secure world we'll end up waiting in * optee_cq_wait_for_completion(). * * Normally if there's no contention in secure world the call will * complete and we can cleanup directly with optee_cq_wait_final().
*/
mutex_lock(&cq->mutex);
/* * We add ourselves to the queue, but we don't wait. This * guarantees that we don't lose a completion if secure world * returns busy and another thread just exited and try to complete * someone.
*/
init_completion(&w->c);
list_add_tail(&w->list_node, &cq->waiters);
w->sys_thread = sys_thread;
if (cq->total_thread_count) { if (sys_thread || !cq->sys_thread_req_count)
free_thread_threshold = 0; else
free_thread_threshold = 1;
if (cq->free_thread_count > free_thread_threshold)
cq->free_thread_count--; else
need_wait = true;
}
mutex_unlock(&cq->mutex);
while (need_wait) {
optee_cq_wait_for_completion(cq, w);
mutex_lock(&cq->mutex);
/* Move to end of list to get out of the way for other waiters */
list_del(&w->list_node);
reinit_completion(&w->c);
list_add_tail(&w->list_node, &cq->waiters);
/* Wake a waiting system session if any, prior to a normal session */
list_for_each_entry(w, &cq->waiters, list_node) { if (w->sys_thread && !completion_done(&w->c)) {
complete(&w->c); return;
}
}
void optee_cq_wait_final(struct optee_call_queue *cq, struct optee_call_waiter *w)
{ /* * We're done with the call to secure world. The thread in secure * world that was used for this call is now available for some * other task to use.
*/
mutex_lock(&cq->mutex);
/* Get out of the list */
list_del(&w->list_node);
cq->free_thread_count++;
/* Wake up one eventual waiting task */
optee_cq_complete_one(cq);
/* * If we're completed we've got a completion from another task that * was just done with its call to secure world. Since yet another * thread now is available in secure world wake up another eventual * waiting task.
*/ if (completion_done(&w->c))
optee_cq_complete_one(cq);
mutex_unlock(&cq->mutex);
}
/* Count registered system sessions to reserved a system thread or not */ staticbool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
{ if (cq->total_thread_count <= 1) returnfalse;
staticvoid optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
{
mutex_lock(&cq->mutex);
cq->sys_thread_req_count--; /* If there's someone waiting, let it resume */
optee_cq_complete_one(cq);
mutex_unlock(&cq->mutex);
}
/* Requires the filpstate mutex to be held */ staticstruct optee_session *find_session(struct optee_context_data *ctxdata,
u32 session_id)
{ struct optee_session *sess;
list_for_each_entry(sess, &ctxdata->sess_list, list_node) if (sess->session_id == session_id) return sess;
mutex_lock(&optee->shm_arg_cache.mutex);
list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY); if (bit < args_per_entry) goto have_entry;
}
/* * No entry was found, let's allocate a new.
*/
entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) {
res = ERR_PTR(-ENOMEM); goto out;
}
if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE); else
res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
if (IS_ERR(res)) {
kfree(entry); goto out;
}
entry->shm = res;
list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
bit = 0;
have_entry:
offs = bit * sz;
res = tee_shm_get_va(entry->shm, offs); if (IS_ERR(res)) goto out;
ma = res;
set_bit(bit, entry->map);
memset(ma, 0, sz);
ma->num_params = num_params;
*entry_ret = entry;
*shm_ret = entry->shm;
*offs_ret = offs;
out:
mutex_unlock(&optee->shm_arg_cache.mutex); return res;
}
/** * optee_free_msg_arg() - Free previsouly obtained shared memory * @ctx: Caller TEE context * @entry: Pointer returned when the shared memory was obtained * @offs: Offset of shared memory buffer to free * * This function frees the shared memory obtained with optee_get_msg_arg().
*/ void optee_free_msg_arg(struct tee_context *ctx, struct optee_shm_arg_entry *entry, u_int offs)
{ struct optee *optee = tee_get_drvdata(ctx->teedev);
size_t sz = optee_msg_arg_size(optee->rpc_param_count);
u_long bit;
if (offs > SHM_ENTRY_SIZE || offs % sz) {
pr_err("Invalid offs %u\n", offs); return;
}
bit = offs / sz;
mutex_lock(&optee->shm_arg_cache.mutex);
if (!test_bit(bit, entry->map))
pr_err("Bit pos %lu is already free\n", bit);
clear_bit(bit, entry->map);
if (msg_arg->ret == TEEC_SUCCESS) { /* A new session has been created, add it to the list. */
sess->session_id = msg_arg->session;
mutex_lock(&ctxdata->mutex);
list_add(&sess->list_node, &ctxdata->sess_list);
mutex_unlock(&ctxdata->mutex);
} else {
kfree(sess);
}
/* Check that the session is valid and remove it from the list */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, session); if (sess)
list_del(&sess->list_node);
mutex_unlock(&ctxdata->mutex); if (!sess) return -EINVAL;
system_thread = sess->use_sys_thread;
kfree(sess);
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, arg->session); if (sess)
system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex); if (!sess) return -EINVAL;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, session); if (sess)
system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex); if (!sess) return -EINVAL;
for_each_vma_range(vmi, vma, end) { if (!is_normal_memory(vma->vm_page_prot)) return -EINVAL;
}
return 0;
}
int optee_check_mem_type(unsignedlong start, size_t num_pages)
{ struct mm_struct *mm = current->mm; int rc;
/* * Allow kernel address to register with OP-TEE as kernel * pages are configured as normal memory only.
*/ if (virt_addr_valid((void *)start) || is_vmalloc_addr((void *)start)) return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.