// SPDX-License-Identifier: GPL-2.0-or-later /* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) * * Module Name: * commctrl.c * * Abstract: Contains all routines for control of the AFA comm layer
*/
kfib = fibptr->hw_fib_va; /* * First copy in the header so that we can check the size field.
*/ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
aac_fib_free(fibptr); return -EFAULT;
} /* * Since we copy based on the fib header size, make sure that we * will not overrun the buffer when we copy the memory. Return * an error if we would.
*/
osize = size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) {
dma_addr_t daddr;
/* Sanity check the second copy */ if ((osize != le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr))
|| (size < le16_to_cpu(kfib->header.SenderSize))) {
retval = -EINVAL; goto cleanup;
}
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev); /* * Since we didn't really send a fib, zero out the state to allow * cleanup code not to assert.
*/
kfib->header.XferState = 0;
} else {
retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
le16_to_cpu(kfib->header.Size) , FsaNormal,
1, 1, NULL, NULL); if (retval) { goto cleanup;
} if (aac_fib_complete(fibptr) != 0) {
retval = -EINVAL; goto cleanup;
}
} /* * Make sure that the size returned by the adapter (which includes * the header) is less than or equal to the size of a fib, so we * don't corrupt application data. Then copy that size to the user * buffer. (Don't try to add the header information again, since it * was already included by the adapter.)
*/
/** * open_getadapter_fib - Get the next fib * @dev: adapter is being processed * @arg: arguments to the open call * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user.
*/ staticint open_getadapter_fib(struct aac_dev * dev, void __user *arg)
{ struct aac_fib_context * fibctx; int status;
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
fibctx->size = sizeof(struct aac_fib_context); /* * Yes yes, I know this could be an index, but we have a * better guarantee of uniqueness for the locked loop below. * Without the aid of a persistent history, this also helps * reduce the chance that the opaque context would be reused.
*/
fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); /* * Initialize the mutex used to wait for the next AIF.
*/
init_completion(&fibctx->completion);
fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on * the list to 0.
*/
fibctx->count = 0;
INIT_LIST_HEAD(&fibctx->fib_list);
fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's * AdapterFibContext list.
*/
spin_lock_irqsave(&dev->fib_lock, flags); /* Ensure that we have a unique identifier */
entry = dev->fib_list.next; while (entry != &dev->fib_list) {
context = list_entry(entry, struct aac_fib_context, next); if (context->unique == fibctx->unique) { /* Not unique (32 bits) */
fibctx->unique++;
entry = dev->fib_list.next;
} else {
entry = entry->next;
}
}
list_add_tail(&fibctx->next, &dev->fib_list);
spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(arg, &fibctx->unique, sizeof(fibctx->unique))) {
status = -EFAULT;
} else {
status = 0;
}
} return status;
}
/** * next_getadapter_fib - get the next fib * @dev: adapter to use * @arg: ioctl argument * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user.
*/ staticint next_getadapter_fib(struct aac_dev * dev, void __user *arg)
{ struct fib_ioctl f; struct fib *fib; struct aac_fib_context *fibctx; int status; struct list_head * entry; unsignedlong flags;
if (in_compat_syscall()) { struct compat_fib_ioctl cf;
if (copy_from_user(&cf, arg, sizeof(struct compat_fib_ioctl))) return -EFAULT;
f.fibctx = cf.fibctx;
f.wait = cf.wait;
f.fib = compat_ptr(cf.fib);
} else { if (copy_from_user(&f, arg, sizeof(struct fib_ioctl))) return -EFAULT;
} /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address
*/
spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
while (entry != &dev->fib_list) {
fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the AdapterFibContext from the Input parameters.
*/ if (fibctx->unique == f.fibctx) { /* We found a winner */ break;
}
entry = entry->next;
fibctx = NULL;
} if (!fibctx) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n")); return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n")); return -EINVAL;
}
status = 0; /* * If there are no fibs to send back, then either wait or return * -EAGAIN
*/
return_fib: if (!list_empty(&fibctx->fib_list)) { /* * Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
kfree(fib->hw_fib_va);
kfree(fib); return -EFAULT;
} /* * Free the space occupied by this copy of the fib.
*/
kfree(fib->hw_fib_va);
kfree(fib);
status = 0;
} else {
spin_unlock_irqrestore(&dev->fib_lock, flags); /* If someone killed the AIF aacraid thread, restart it */
status = !dev->aif_thread; if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { /* Be paranoid, be very paranoid! */
kthread_stop(dev->thread);
ssleep(1);
dev->aif_thread = 0;
dev->thread = kthread_run(aac_command_thread, dev, "%s", dev->name);
ssleep(1);
} if (f.wait) { if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
status = -ERESTARTSYS;
} else { /* Lock again and retry */
spin_lock_irqsave(&dev->fib_lock, flags); goto return_fib;
}
} else {
status = -EAGAIN;
}
}
fibctx->jiffies = jiffies/HZ; return status;
}
/* * First free any FIBs that have not been consumed.
*/ while (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--; /* * Free the space occupied by this copy of the fib.
*/
kfree(fib->hw_fib_va);
kfree(fib);
} /* * Remove the Context from the AdapterFibContext List
*/
list_del(&fibctx->next); /* * Invalidate context
*/
fibctx->type = 0; /* * Free the space occupied by the Context
*/
kfree(fibctx); return 0;
}
/** * close_getadapter_fib - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine will close down the fibctx passed in from the user.
*/
/* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address
*/
entry = dev->fib_list.next;
fibctx = NULL;
while(entry != &dev->fib_list) {
fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the fibctx from the input parameters
*/ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ break;
entry = entry->next;
fibctx = NULL;
}
/** * check_revision - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine returns the driver version. * Under Linux, there have been no version incompatibilities, so this is * simple!
*/
if (dev->in_reset) {
dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); return -EBUSY;
} if (!capable(CAP_SYS_ADMIN)){
dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); return -EPERM;
} /* * Allocate and initialize a Fib then setup a SRB command
*/ if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM;
}
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT; goto cleanup;
}
/* iu_type is a parameter of aac_hba_send */ switch (data_dir) { case DMA_TO_DEVICE:
hbacmd->byte1 = 2; break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL:
hbacmd->byte1 = 1; break; case DMA_NONE: default: break;
}
hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
/* * we fill in reply_qid later in aac_src_deliver_message * we fill in iu_type, request_id later in aac_hba_send * we fill in emb_data_desc_count, data_length later * in sg list build
*/
/* * This should also catch if user used the 32 bit sgmap
*/ if (actual_fibsize64 == fibsize) {
actual_fibsize = actual_fibsize64; for (i = 0; i < upsg->count; i++) {
u64 addr; void* p;
p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
rcode = -ENOMEM; goto cleanup;
}
addr = (u64)upsg->sg[i].addr[0];
addr += ((u64)upsg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)(uintptr_t)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i],
sg_count[i])){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT; goto cleanup;
}
}
addr = dma_map_single(&dev->pdev->dev, p,
sg_count[i], data_dir);
p = kmalloc(sg_count[i], GFP_KERNEL); if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
kfree(usg);
rcode = -ENOMEM; goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i],
sg_count[i])) {
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT; goto cleanup;
}
}
addr = dma_map_single(&dev->pdev->dev, p,
sg_count[i], data_dir);
if (actual_fibsize64 == fibsize) { struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; for (i = 0; i < upsg->count; i++) {
uintptr_t addr; void* p;
sg_count[i] = usg->sg[i].count; if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL; goto cleanup;
}
p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
rcode = -ENOMEM; goto cleanup;
}
addr = (u64)usg->sg[i].addr[0];
addr += ((u64)usg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i],
sg_count[i])){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT; goto cleanup;
}
}
addr = dma_map_single(&dev->pdev->dev, p,
usg->sg[i].count,
data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(sg_count[i]);
}
} else { for (i = 0; i < upsg->count; i++) {
dma_addr_t addr; void* p;
sg_count[i] = upsg->sg[i].count; if (sg_count[i] >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL; goto cleanup;
}
p = kmalloc(sg_count[i], GFP_KERNEL); if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
rcode = -ENOMEM; goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) { if (copy_from_user(p, sg_user[i],
sg_count[i])) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT; goto cleanup;
}
}
addr = dma_map_single(&dev->pdev->dev, p,
sg_count[i], data_dir);
if (status != 0) {
dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO; goto cleanup;
}
if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT; goto cleanup;
reply = (struct aac_srb_reply *) fib_data(srbfib); if (copy_to_user(user_reply, reply, sizeof(struct aac_srb_reply))) {
dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
rcode = -EFAULT; goto cleanup;
}
}
cleanup:
kfree(user_srbcmd); if (rcode != -ERESTARTSYS) { for (i = 0; i <= sg_indx; i++)
kfree(sg_list[i]);
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
int aac_do_ioctl(struct aac_dev *dev, unsignedint cmd, void __user *arg)
{ int status;
mutex_lock(&dev->ioctl_mutex);
if (dev->adapter_shutdown) {
status = -EACCES; goto cleanup;
}
/* * HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg); if (status != -ENOTTY) goto cleanup;
switch (cmd) { case FSACTL_MINIPORT_REV_CHECK:
status = check_revision(dev, arg); break; case FSACTL_SEND_LARGE_FIB: case FSACTL_SENDFIB:
status = ioctl_send_fib(dev, arg); break; case FSACTL_OPEN_GET_ADAPTER_FIB:
status = open_getadapter_fib(dev, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB:
status = next_getadapter_fib(dev, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB:
status = close_getadapter_fib(dev, arg); break; case FSACTL_SEND_RAW_SRB:
status = aac_send_raw_srb(dev,arg); break; case FSACTL_GET_PCI_INFO:
status = aac_get_pci_info(dev,arg); break; case FSACTL_GET_HBA_INFO:
status = aac_get_hba_info(dev, arg); break; case FSACTL_RESET_IOP:
status = aac_send_reset_adapter(dev, arg); break;
default:
status = -ENOTTY; break;
}
cleanup:
mutex_unlock(&dev->ioctl_mutex);
return status;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.