if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_end_auth_session(chip);
ret = tpm2_prepare_space(chip, space, buf, bufsiz); /* If the command is not implemented by the TPM, synthesize a * response with a TPM2_RC_COMMAND_CODE return for user-space.
*/ if (ret == -EOPNOTSUPP) {
header->length = cpu_to_be32(sizeof(*header));
header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
TSS2_RESMGR_TPM_RC_LAYER);
ret = sizeof(*header);
} if (ret) goto out_rc;
len = tpm_transmit(chip, buf, bufsiz); if (len < 0)
ret = len;
if (!ret)
ret = tpm2_commit_space(chip, space, buf, &len); else
tpm2_flush_space(chip);
mutex_lock(&priv->buffer_mutex);
priv->command_enqueued = false;
ret = tpm_try_get_ops(priv->chip); if (ret) {
priv->response_length = ret; goto out;
}
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
/* * If ret is > 0 then tpm_dev_transmit returned the size of the * response. If ret is < 0 then tpm_dev_transmit failed and * returned an error code.
*/ if (ret != 0) {
priv->response_length = ret;
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
}
out:
mutex_unlock(&priv->buffer_mutex);
wake_up_interruptible(&priv->async_wait);
}
/* Cannot perform a write until the read has cleared either via * tpm_read or a user_read_timer timeout. This also prevents split * buffered writes from blocking here.
*/ if ((!priv->response_read && priv->response_length) ||
priv->command_enqueued) {
ret = -EBUSY; goto out;
}
if (copy_from_user(priv->data_buffer, buf, size)) {
ret = -EFAULT; goto out;
}
if (size < 6 ||
size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
ret = -EINVAL; goto out;
}
/* * If in nonblocking mode schedule an async job to send * the command return the size. * In case of error the err code will be returned in * the subsequent read call.
*/ if (file->f_flags & O_NONBLOCK) {
priv->command_enqueued = true;
queue_work(tpm_dev_wq, &priv->async_work);
mutex_unlock(&priv->buffer_mutex); return size;
}
/* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open.
*/ if (tpm_try_get_ops(priv->chip)) {
ret = -EPIPE; goto out;
}
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
/* * The response_length indicates if there is still response * (or part of it) to be consumed. Partial reads decrease it * by the number of bytes read, and write resets it the zero.
*/ if (priv->response_length)
mask = EPOLLIN | EPOLLRDNORM; else
mask = EPOLLOUT | EPOLLWRNORM;
mutex_unlock(&priv->buffer_mutex); return mask;
}
/* * Called on file close
*/ void tpm_common_release(struct file *file, struct file_priv *priv)
{
flush_work(&priv->async_work);
timer_delete_sync(&priv->user_read_timer);
flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->response_length = 0;
}
int __init tpm_dev_common_init(void)
{
tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.