/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
/* * Process a rescan of the transport layer. * Runs a synchronous AP bus rescan. * Returns true if something has changed (for example the * bus scan has found and build up new devices) and it is * worth to do a retry. Otherwise false is returned meaning * no changes on the AP bus level.
*/ staticinlinebool zcrypt_process_rescan(void)
{ return ap_bus_force_rescan();
}
/* * Find zcdn device by name. * Returns reference to the zcdn device which needs to be released * with put_device() after use.
*/ staticinlinestruct zcdn_device *find_zcdndev_by_name(constchar *name)
{ struct device *dev = class_find_device_by_name(&zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
/* * Find zcdn device by devt value. * Returns reference to the zcdn device which needs to be released * with put_device() after use.
*/ staticinlinestruct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{ struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
static ssize_t ioctlmask_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
n = sysfs_emit(buf, "0x"); for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
n += sysfs_emit_at(buf, n, "\n");
rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
AP_IOCTLS, &ap_perms_mutex); if (rc) return rc;
return count;
}
static DEVICE_ATTR_RW(ioctlmask);
static ssize_t apmask_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
n = sysfs_emit(buf, "0x"); for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
n += sysfs_emit_at(buf, n, "\n");
rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
AP_DEVICES, &ap_perms_mutex); if (rc) return rc;
return count;
}
static DEVICE_ATTR_RW(apmask);
static ssize_t aqmask_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
n = sysfs_emit(buf, "0x"); for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
n += sysfs_emit_at(buf, n, "\n");
rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
AP_DOMAINS, &ap_perms_mutex); if (rc) return rc;
return count;
}
static DEVICE_ATTR_RW(aqmask);
static ssize_t admask_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct zcdn_device *zcdndev = to_zcdn_dev(dev); int i, n;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
n = sysfs_emit(buf, "0x"); for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
n += sysfs_emit_at(buf, n, "\n");
staticint zcdn_create(constchar *name)
{
dev_t devt; int i, rc = 0; struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
/* check if device node with this name already exists */ if (name[0]) {
zcdndev = find_zcdndev_by_name(name); if (zcdndev) {
put_device(&zcdndev->device);
rc = -EEXIST; goto unlockout;
}
}
/* find an unused minor number */ for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
zcdndev = find_zcdndev_by_devt(devt); if (zcdndev)
put_device(&zcdndev->device); else break;
} if (i == ZCRYPT_MAX_MINOR_NODES) {
rc = -ENOSPC; goto unlockout;
}
/* alloc and prepare a new zcdn device */
zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); if (!zcdndev) {
rc = -ENOMEM; goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
zcdndev->device.class = &zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups; if (name[0])
rc = dev_set_name(&zcdndev->device, "%s", name); else
rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); if (rc) {
kfree(zcdndev); goto unlockout;
}
rc = device_register(&zcdndev->device); if (rc) {
put_device(&zcdndev->device); goto unlockout;
}
ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
__func__, MAJOR(devt), MINOR(devt));
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
/* try to find this zcdn device */
zcdndev = find_zcdndev_by_name(name); if (!zcdndev) {
rc = -ENOENT; goto unlockout;
}
/* * The zcdn device is not hard destroyed. It is subject to * reference counting and thus just needs to be unregistered.
*/
put_device(&zcdndev->device);
device_unregister(&zcdndev->device);
staticvoid zcdn_destroy_all(void)
{ int i;
dev_t devt; struct zcdn_device *zcdndev;
mutex_lock(&ap_perms_mutex); for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
zcdndev = find_zcdndev_by_devt(devt); if (zcdndev) {
put_device(&zcdndev->device);
device_unregister(&zcdndev->device);
}
}
mutex_unlock(&ap_perms_mutex);
}
/* * zcrypt_read (): Not supported beyond zcrypt 1.3.1. * * This function is not supported beyond zcrypt 1.3.1.
*/ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{ return -EPERM;
}
/* * zcrypt_write(): Not allowed. * * Write is not allowed
*/ static ssize_t zcrypt_write(struct file *filp, constchar __user *buf,
size_t count, loff_t *f_pos)
{ return -EPERM;
}
/* * zcrypt_open(): Count number of users. * * Device open function to count number of users.
*/ staticint zcrypt_open(struct inode *inode, struct file *filp)
{ struct ap_perms *perms = &ap_perms;
if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex)) return -ERESTARTSYS;
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); /* find returns a reference, no get_device() needed */
mutex_unlock(&ap_perms_mutex); if (zcdndev)
perms = &zcdndev->perms;
}
filp->private_data = (void *)perms;
/* * zcrypt_release(): Count number of users. * * Device close function to count number of users.
*/ staticint zcrypt_release(struct inode *inode, struct file *filp)
{ if (filp->f_inode->i_cdev == &zcrypt_cdev) { struct zcdn_device *zcdndev;
mutex_lock(&ap_perms_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
mutex_unlock(&ap_perms_mutex); if (zcdndev) { /* 2 puts here: one for find, one for open */
put_device(&zcdndev->device);
put_device(&zcdndev->device);
}
}
atomic_dec(&zcrypt_open_count); return 0;
}
staticinlineint zcrypt_check_ioctl(struct ap_perms *perms, unsignedint cmd)
{ int rc = -EPERM; int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { if (test_bit_inv(ioctlnr, perms->ioctlm))
rc = 0;
}
/* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case
*/
mex->outputdatalength = mex->inputdatalength;
rc = get_rsa_modex_fc(mex, &func_code); if (rc) goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) { /* Check for usable accelerator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) continue; /* Check for size limits */ if (zc->min_mod_size > mex->inputdatalength ||
zc->max_mod_size < mex->inputdatalength) continue; /* check if device node has admission for this card */ if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */
wgt = zc->speed_rating[func_code]; /* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue;
for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo ||
!ap_queue_usable(zq->queue)) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid))) continue; /* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt)) continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV; goto out;
}
/* * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case
*/
crt->outputdatalength = crt->inputdatalength;
rc = get_rsa_crt_fc(crt, &func_code); if (rc) goto out;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) { /* Check for usable accelerator or CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop ||
!(zc->card->hwinfo.accel || zc->card->hwinfo.cca)) continue; /* Check for size limits */ if (zc->min_mod_size > crt->inputdatalength ||
zc->max_mod_size < crt->inputdatalength) continue; /* check if device node has admission for this card */ if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */
wgt = zc->speed_rating[func_code]; /* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue;
for_each_zcrypt_queue(zq, zc) { /* check if device is usable and eligible */ if (!zq->online || !zq->ops->rsa_modexpo_crt ||
!ap_queue_usable(zq->queue)) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid))) continue; /* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt)) continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV; goto out;
}
tdom = *domain; if (perms != &ap_perms && tdom < AP_DOMAINS) { if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { if (!test_bit_inv(tdom, perms->adm)) {
rc = -ENODEV; goto out;
}
} elseif ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
rc = -EOPNOTSUPP; goto out;
}
} /* * If a valid target domain is set and this domain is NOT a usage * domain but a control only domain, autoselect target domain.
*/ if (tdom < AP_DOMAINS &&
!ap_test_config_usage_domain(tdom) &&
ap_test_config_ctrl_domain(tdom))
tdom = AUTOSEL_DOM;
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) { /* Check for usable CCA card */ if (!zc->online || !zc->card->config || zc->card->chkstop ||
!zc->card->hwinfo.cca) continue; /* Check for user selected CCA card */ if (xcrb->user_defined != AUTOSELECT &&
xcrb->user_defined != zc->card->id) continue; /* check if request size exceeds card max msg size */ if (ap_msg.len > zc->card->maxmsgsize) continue; /* check if device node has admission for this card */ if (!zcrypt_check_card(perms, zc->card->id)) continue; /* get weight index of the card device */
wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; /* penalty if this msg was previously sent via this card */
cpen = (tr && tr->again_counter && tr->last_qid &&
AP_QID_CARD(tr->last_qid) == zc->card->id) ?
TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) continue;
for_each_zcrypt_queue(zq, zc) { /* check for device usable and eligible */ if (!zq->online || !zq->ops->send_cprb ||
!ap_queue_usable(zq->queue) ||
(tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid))) continue; /* check if device node has admission for this queue */ if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid))) continue; /* penalty if the msg was previously sent at this qid */
qpen = (tr && tr->again_counter && tr->last_qid &&
tr->last_qid == zq->queue->qid) ?
TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; if (!zcrypt_queue_compare(zq, pref_zq,
wgt + cpen + qpen, pref_wgt)) continue;
pref_zc = zc;
pref_zq = zq;
pref_wgt = wgt + cpen + qpen;
}
}
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
pr_debug("no match for address %02x.%04x => ENODEV\n",
xcrb->user_defined, *domain);
rc = -ENODEV; goto out;
}
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid; if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
{ struct zcrypt_track tr; int rc;
memset(&tr, 0, sizeof(tr));
do {
rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do {
rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO; if (rc)
pr_debug("rc=%d\n", rc);
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
{ struct zcrypt_track tr; int rc;
memset(&tr, 0, sizeof(tr));
do {
rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
/* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do {
rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
} while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO; if (rc)
pr_debug("rc=%d\n", rc);
staticint zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
{ int rc;
/* * We don't need locking here because the RNG API guarantees serialized * read method calls.
*/ if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *)zcrypt_rng_buffer); /* on ENODEV failure: retry once again after an AP bus rescan */ if (rc == -ENODEV && zcrypt_process_rescan())
rc = zcrypt_rng((char *)zcrypt_rng_buffer); if (rc < 0) return -EIO;
zcrypt_rng_buffer_index = rc / sizeof(*data);
}
*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; returnsizeof(*data);
}
/* * Wait until the zcrypt api is operational. * The AP bus scan and the binding of ap devices to device drivers is * an asynchronous job. This function waits until these initial jobs * are done and so the zcrypt api should be ready to serve crypto * requests - if there are resources available. The function uses an * internal timeout of 30s. The very first caller will either wait for * ap bus bindings complete or the timeout happens. This state will be * remembered for further callers which will only be blocked until a * decision is made (timeout or bindings complete). * On timeout -ETIME is returned, on success the return value is 0.
*/ int zcrypt_wait_api_operational(void)
{ static DEFINE_MUTEX(zcrypt_wait_api_lock); staticint zcrypt_wait_api_state; int rc;
rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); if (rc) return rc;
switch (zcrypt_wait_api_state) { case 0: /* initial state, invoke wait for the ap bus complete */
rc = ap_wait_apqn_bindings_complete(
msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS)); switch (rc) { case 0: /* ap bus bindings are complete */
zcrypt_wait_api_state = 1; break; case -EINTR: /* interrupted, go back to caller */ break; case -ETIME: /* timeout */
ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
__func__);
zcrypt_wait_api_state = -ETIME; break; default: /* other failure */
pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc); break;
} break; case 1: /* a previous caller already found ap bus bindings complete */
rc = 0; break; default: /* a previous caller had timeout or other failure */
rc = zcrypt_wait_api_state; break;
}
/* need some class specific sysfs attributes */
rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create); if (rc) goto out_class_create_file_1_failed;
rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy); if (rc) goto out_class_create_file_2_failed;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.