rc = ibmvtpm_crq_send_init(ibmvtpm); if (rc)
dev_err(dev, "Error send_init rc=%d\n", rc);
return rc;
}
/** * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send * @bufsiz: size of the buffer * @count: length of the command * * Return: * 0 on success, * -errno on error
*/ staticint tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
size_t count)
{ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); bool retry = true; int rc, sig;
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n"); return 0;
}
if (count > ibmvtpm->rtce_size) {
dev_err(ibmvtpm->dev, "Invalid size in send: count=%zd, rtce_size=%d\n",
count, ibmvtpm->rtce_size); return -EIO;
}
if (ibmvtpm->tpm_processing_cmd) {
dev_info(ibmvtpm->dev, "Need to wait for TPM to finish\n"); /* wait for previous command to finish */
sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); if (sig) return -EINTR;
}
/* * set the processing flag before the Hcall, since we may get the * result (interrupt) before even being able to check rc.
*/
ibmvtpm->tpm_processing_cmd = 1;
again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
count, ibmvtpm->rtce_dma_handle); if (rc != H_SUCCESS) { /* * H_CLOSED can be returned after LPM resume. Call * tpm_ibmvtpm_resume() to re-enable the CRQ then retry * ibmvtpm_send_crq() once before failing.
*/ if (rc == H_CLOSED && retry) {
tpm_ibmvtpm_resume(ibmvtpm->dev);
retry = false; goto again;
}
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
ibmvtpm->tpm_processing_cmd = 0;
}
/** * ibmvtpm_crq_get_version - Send a CRQ request to get vtpm version * - Note that this is vtpm version and not tpm version * * @ibmvtpm: vtpm device struct * * Return: * 0 on success. * Non-zero on failure.
*/ staticint ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
{ int rc;
if (ibmvtpm->rtce_buf) {
dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
kfree(ibmvtpm->rtce_buf);
}
kfree(ibmvtpm); /* For tpm_ibmvtpm_get_desired_dma */
dev_set_drvdata(&vdev->dev, NULL);
}
/** * tpm_ibmvtpm_get_desired_dma - Get DMA size needed by this driver * @vdev: vio device struct * * Return: * Number of bytes the driver needs to DMA map.
*/ staticunsignedlong tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); struct ibmvtpm_dev *ibmvtpm;
/* * ibmvtpm initializes at probe time, so the data we are * asking for may not be set yet. Estimate that 4K required * for TCE-mapped buffer in addition to CRQ.
*/ if (chip)
ibmvtpm = dev_get_drvdata(&chip->dev); else return CRQ_RES_BUF_SIZE + PAGE_SIZE;
if (dma_mapping_error(ibmvtpm->dev,
ibmvtpm->rtce_dma_handle)) {
kfree(ibmvtpm->rtce_buf);
ibmvtpm->rtce_buf = NULL;
dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
}
return; case VTPM_GET_VERSION_RES:
ibmvtpm->vtpm_version = be32_to_cpu(crq->data); return; case VTPM_TPM_COMMAND_RES: /* len of the data in rtce buffer */
ibmvtpm->res_len = be16_to_cpu(crq->len);
ibmvtpm->tpm_processing_cmd = 0;
wake_up_interruptible(&ibmvtpm->wq); return; default: return;
}
} return;
}
/* while loop is needed for initial setup (get version and * get rtce_size). There should be only one tpm request at any * given time.
*/ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
ibmvtpm_crq_process(crq, ibmvtpm);
wake_up_interruptible(&ibmvtpm->crq_queue.wq);
crq->valid = 0;
smp_wmb();
}
return IRQ_HANDLED;
}
/** * tpm_ibmvtpm_probe - ibm vtpm initialize entry point * * @vio_dev: vio device struct * @id: vio device id struct * * Return: * 0 on success. * Non-zero on failure.
*/ staticint tpm_ibmvtpm_probe(struct vio_dev *vio_dev, conststruct vio_device_id *id)
{ struct ibmvtpm_dev *ibmvtpm; struct device *dev = &vio_dev->dev; struct ibmvtpm_crq_queue *crq_q; struct tpm_chip *chip; int rc = -ENOMEM, rc1;
chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm); if (IS_ERR(chip)) return PTR_ERR(chip);
ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL); if (!ibmvtpm) {
dev_err(dev, "kzalloc for ibmvtpm failed\n"); goto cleanup;
}
ibmvtpm->dev = dev;
ibmvtpm->vdev = vio_dev;
crq_q = &ibmvtpm->crq_queue;
crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL); if (!crq_q->crq_addr) {
dev_err(dev, "Unable to allocate memory for crq_addr\n"); goto cleanup;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.