// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Micron P320 SSD * Copyright (C) 2011 Micron Technology, Inc. * * Portions of this code were derived from works subjected to the * following copyright: * Copyright (C) 2009 Integrated Device Technology, Inc.
*/
/* * This function check_for_surprise_removal is called * while card is removed from the system and it will * read the vendor id from the configuration space * * @pdev Pointer to the pci_dev structure. * * return value * true if device removed, else false
*/ staticbool mtip_check_surprise_removal(struct driver_data *dd)
{
u16 vendor_id = 0;
if (dd->sr) returntrue;
/* Read the vendorID from the configuration space */
pci_read_config_word(dd->pdev, 0x00, &vendor_id); if (vendor_id == 0xFFFF) {
dd->sr = true; if (dd->disk)
blk_mark_disk_dead(dd->disk); returntrue; /* device removed */
}
/* * Reset the HBA (without sleeping) * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear.
*/ staticint mtip_hba_reset(struct driver_data *dd)
{ unsignedlong timeout;
/* Set the reset bit */
writel(HOST_RESET, dd->mmio + HOST_CTL);
/* Flush */
readl(dd->mmio + HOST_CTL);
/* * Spin for up to 10 seconds waiting for reset acknowledgement. Spec * is 1 sec but in LUN failure conditions, up to 10 secs are required
*/
timeout = jiffies + msecs_to_jiffies(10000); do {
mdelay(10); if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) return -1;
} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
&& time_before(jiffies, timeout));
if (readl(dd->mmio + HOST_CTL) & HOST_RESET) return -1;
return 0;
}
/* * Issue a command to the hardware. * * Set the appropriate bit in the s_active and Command Issue hardware * registers, causing hardware command processing to begin. * * @port Pointer to the port structure. * @tag The tag of the command to be issued. * * return value * None
*/ staticinlinevoid mtip_issue_ncq_command(struct mtip_port *port, int tag)
{ int group = tag >> 5;
/* guard SACT and CI registers */
spin_lock(&port->cmd_issue_lock[group]);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
spin_unlock(&port->cmd_issue_lock[group]);
}
/* * Enable/disable the reception of FIS * * @port Pointer to the port data structure * @enable 1 to enable, 0 to disable * * return value * Previous state: 1 enabled, 0 disabled
*/ staticint mtip_enable_fis(struct mtip_port *port, int enable)
{
u32 tmp;
/* * Enable/disable the DMA engine * * @port Pointer to the port data structure * @enable 1 to enable, 0 to disable * * return value * Previous state: 1 enabled, 0 disabled.
*/ staticint mtip_enable_engine(struct mtip_port *port, int enable)
{
u32 tmp;
/* * Enables the port DMA engine and FIS reception. * * return value * None
*/ staticinlinevoid mtip_start_port(struct mtip_port *port)
{ /* Enable FIS reception */
mtip_enable_fis(port, 1);
/* Enable the DMA engine */
mtip_enable_engine(port, 1);
}
/* * Deinitialize a port by disabling port interrupts, the DMA engine, * and FIS reception. * * @port Pointer to the port structure * * return value * None
*/ staticinlinevoid mtip_deinit_port(struct mtip_port *port)
{ /* Disable interrupts on this port */
writel(0, port->mmio + PORT_IRQ_MASK);
/* Disable the DMA engine */
mtip_enable_engine(port, 0);
/* * Initialize a port. * * This function deinitializes the port by calling mtip_deinit_port() and * then initializes it by setting the command header and RX FIS addresses, * clearing the SError register and any pending port interrupts before * re-enabling the default set of port interrupts. * * @port Pointer to the port structure. * * return value * None
*/ staticvoid mtip_init_port(struct mtip_port *port)
{ int i;
mtip_deinit_port(port);
/* Program the command list base and FIS base addresses */ if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
writel((port->command_list_dma >> 16) >> 16,
port->mmio + PORT_LST_ADDR_HI);
writel((port->rxfis_dma >> 16) >> 16,
port->mmio + PORT_FIS_ADDR_HI);
set_bit(MTIP_PF_HOST_CAP_64, &port->flags);
}
/* reset the completed registers.*/ for (i = 0; i < port->dd->slot_groups; i++)
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
/* Clear any pending interrupts on the HBA. */
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
port->dd->mmio + HOST_IRQ_STAT);
/* Enable port interrupts */
writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
}
/* * Restart a port * * @port Pointer to the port data structure. * * return value * None
*/ staticvoid mtip_restart_port(struct mtip_port *port)
{ unsignedlong timeout;
/* Disable the DMA engine */
mtip_enable_engine(port, 0);
/* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
timeout = jiffies + msecs_to_jiffies(500); while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
&& time_before(jiffies, timeout))
;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return;
/* * Chip quirk: escalate to hba reset if * PxCMD.CR not clear after 500 ms
*/ if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
dev_warn(&port->dd->pdev->dev, "PxCMD.CR not clear, escalating reset\n");
if (mtip_hba_reset(port->dd))
dev_err(&port->dd->pdev->dev, "HBA reset escalation failed.\n");
/* 30 ms delay before com reset to quiesce chip */
mdelay(30);
}
dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
/* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
timeout = jiffies + msecs_to_jiffies(500); while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
&& time_before(jiffies, timeout))
;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return;
/* Restart the port */
mdelay(20);
mtip_restart_port(port);
/* Trying to determine the cause of the error */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd->port->log_buf,
dd->port->log_buf_dma, 1); if (rv) {
dev_warn(&dd->pdev->dev, "Error in READ LOG EXT (10h) command\n"); /* non-critical error, don't fail the load */
} else {
buf = (unsignedchar *)dd->port->log_buf; if (buf[259] & 0x1) {
dev_info(&dd->pdev->dev, "Write protect bit is set.\n");
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
fail_all_ncq_write = 1;
fail_reason = "write protect";
} if (buf[288] == 0xF7) {
dev_info(&dd->pdev->dev, "Exceeded Tmax, drive in thermal shutdown.\n");
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
fail_all_ncq_cmds = 1;
fail_reason = "thermal shutdown";
} if (buf[288] == 0xBF) {
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev, "Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
fail_reason = "rebuild failed";
}
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
/* Loop through all the groups */ for (group = 0; group < dd->slot_groups; group++) { for (bit = 0; bit < 32; bit++) {
reissue = 1;
tag = (group << 5) + bit;
cmd = mtip_cmd_from_tag(dd, tag);
/* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT); if (unlikely(port_stat == 0xFFFFFFFF)) {
mtip_check_surprise_removal(dd); return IRQ_HANDLED;
}
writel(port_stat, port->mmio + PORT_IRQ_STAT);
/* Demux port status */ if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
do_irq_enable = 0;
WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
/* Start at 1: group zero is always local? */ for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
i++) {
twork = &dd->work[i];
twork->completed = readl(port->completed[i]); if (twork->completed)
workers++;
}
atomic_set(&dd->irq_workers_active, workers); if (workers) { for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
twork = &dd->work[i]; if (twork->completed)
queue_work_on(
twork->cpu_binding,
dd->isr_workq,
&twork->work);
}
if (likely(dd->work[0].completed))
mtip_workq_sdbfx(port, 0,
dd->work[0].completed);
} else { /* * Chip quirk: SDB interrupt but nothing * to complete
*/
do_irq_enable = 1;
}
}
if (unlikely(port_stat & PORT_IRQ_ERR)) { if (unlikely(mtip_check_surprise_removal(dd))) { /* don't proceed further */ return IRQ_HANDLED;
} if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) return rv;
if (unlikely(port_stat & PORT_IRQ_LEGACY))
mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
}
/* acknowledge interrupt */ if (unlikely(do_irq_enable))
writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
return rv;
}
/* * HBA interrupt subroutine. * * @irq IRQ number. * @instance Pointer to the driver data structure. * * return value * IRQ_HANDLED A HBA interrupt was pending and handled. * IRQ_NONE This interrupt was not for the HBA.
*/ static irqreturn_t mtip_irq_handler(int irq, void *instance)
{ struct driver_data *dd = instance;
/* * Ignore s_active bit 0 of array element 0. * This bit will always be set
*/
active = readl(port->s_active[0]) & 0xFFFFFFFE; for (n = 1; n < port->dd->slot_groups; n++)
active |= readl(port->s_active[n]);
return active != 0;
}
/* * Wait for port to quiesce * * @port Pointer to port data structure * @timeout Max duration to wait (ms) * * return value * 0 Success * -EBUSY Commands still active
*/ staticint mtip_quiesce_io(struct mtip_port *port, unsignedlong timeout)
{ unsignedlong to; bool active = true;
blk_mq_quiesce_queue(port->dd->queue);
to = jiffies + msecs_to_jiffies(timeout); do { if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20); continue; /* svc thd is actively issuing commands */
}
msleep(100);
if (mtip_check_surprise_removal(port->dd)) goto err_fault;
active = mtip_commands_active(port); if (!active) break;
} while (time_before(jiffies, to));
struct mtip_int_cmd { int fis_len;
dma_addr_t buffer; int buf_len;
u32 opts;
};
/* * Execute an internal command and wait for the completion. * * @port Pointer to the port data structure. * @fis Pointer to the FIS that describes the command. * @fis_len Length in WORDS of the FIS. * @buffer DMA accessible for command data. * @buf_len Length, in bytes, of the data buffer. * @opts Command header options, excluding the FIS length * and the number of PRD entries. * @timeout Time in ms to wait for the command to complete. * * return value * 0 Command completed successfully. * -EFAULT The buffer address is not correctly aligned. * -EBUSY Internal command or other IO in progress. * -EAGAIN Time out waiting for command to complete.
*/ staticint mtip_exec_internal_command(struct mtip_port *port, struct host_to_dev_fis *fis, int fis_len,
dma_addr_t buffer, int buf_len,
u32 opts, unsignedlong timeout)
{ struct mtip_cmd *int_cmd; struct driver_data *dd = port->dd; struct request *rq; struct mtip_int_cmd icmd = {
.fis_len = fis_len,
.buffer = buffer,
.buf_len = buf_len,
.opts = opts
}; int rv = 0;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) {
dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); return -EFAULT;
}
if (mtip_check_surprise_removal(dd)) return -EFAULT;
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); if (IS_ERR(rq)) {
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n"); return -EFAULT;
}
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
if (fis->command == ATA_CMD_SEC_ERASE_PREP)
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait); return -EBUSY;
}
}
/* Copy the command to the command table */
int_cmd = blk_mq_rq_to_pdu(rq);
int_cmd->icmd = &icmd;
memcpy(int_cmd->command, fis, fis_len*4);
rq->timeout = timeout;
/* insert request and run queue */
blk_execute_rq(rq, true);
if (mtip_check_surprise_removal(dd) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) {
dev_err(&dd->pdev->dev, "Internal command [%02X] wait returned due to SR\n",
fis->command);
rv = -ENXIO; goto exec_ic_exit;
}
mtip_device_reset(dd); /* recover from timeout issue */
rv = -EAGAIN; goto exec_ic_exit;
}
if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
& (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
rv = -ENXIO; if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
mtip_device_reset(dd);
rv = -EAGAIN;
}
}
exec_ic_exit: /* Clear the allocated and active bits for the internal command. */
blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); if (rv >= 0 && mtip_pause_ncq(port, fis)) { /* NCQ paused */ return rv;
}
wake_up_interruptible(&port->svc_wait);
return rv;
}
/* * Byte-swap ATA ID strings. * * ATA identify data contains strings in byte-swapped 16-bit words. * They must be swapped (on all architectures) to be usable as C strings. * This function swaps bytes in-place. * * @buf The buffer location of the string * @len The number of bytes to swap * * return value * None
*/ staticinlinevoid ata_swap_string(u16 *buf, unsignedint len)
{ int i; for (i = 0; i < (len/2); i++)
be16_to_cpus(&buf[i]);
}
/* * Request the device identity information. * * If a user space buffer is not specified, i.e. is NULL, the * identify information is still read from the drive and placed * into the identify data buffer (@e port->identify) in the * port data structure. * When the identify buffer contains valid identify information @e * port->identify_valid is non-zero. * * @port Pointer to the port structure. * @user_buffer A user space buffer where the identify data should be * copied. * * return value * 0 Command completed successfully. * -EFAULT An error occurred while coping data to the user buffer. * -1 Command failed.
*/ staticint mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
{ int rv = 0; struct host_to_dev_fis fis;
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return -EFAULT;
/* * Perform any necessary byte-swapping. Yes, the kernel does in fact * perform field-sensitive swapping on the string fields. * See the kernel use of ata_id_string() for proof of this.
*/ #ifdef __LITTLE_ENDIAN
ata_swap_string(port->identify + 27, 40); /* model string*/
ata_swap_string(port->identify + 23, 8); /* firmware string*/
ata_swap_string(port->identify + 10, 20); /* serial# string*/ #else
{ int i; for (i = 0; i < ATA_ID_WORDS; i++)
port->identify[i] = le16_to_cpu(port->identify[i]);
} #endif
/* Check security locked state */ if (port->identify[128] & 0x4)
set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); else
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
/* Set the identify buffer as valid. */
port->identify_valid = 1;
if (user_buffer) { if (copy_to_user(
user_buffer,
port->identify,
ATA_ID_WORDS * sizeof(u16))) {
rv = -EFAULT; goto out;
}
}
out: return rv;
}
/* * Issue a standby immediate command to the device. * * @port Pointer to the port structure. * * return value * 0 Command was executed successfully. * -1 An error occurred while executing the command.
*/ staticint mtip_standby_immediate(struct mtip_port *port)
{ int rv; struct host_to_dev_fis fis; unsignedlong __maybe_unused start; unsignedint timeout;
/* * Issue a READ LOG EXT command to the device. * * @port pointer to the port structure. * @page page number to fetch * @buffer pointer to buffer * @buffer_dma dma address corresponding to @buffer * @sectors page length to fetch, in sectors * * return value * @rv return value from mtip_exec_internal_command()
*/ staticint mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
dma_addr_t buffer_dma, unsignedint sectors)
{ struct host_to_dev_fis fis;
/* * Issue a SMART READ DATA command to the device. * * @port pointer to the port structure. * @buffer pointer to buffer * @buffer_dma dma address corresponding to @buffer * * return value * @rv return value from mtip_exec_internal_command()
*/ staticint mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
dma_addr_t buffer_dma)
{ struct host_to_dev_fis fis;
/* * Get the value of a smart attribute * * @port pointer to the port structure * @id attribute number * @attrib pointer to return attrib information corresponding to @id * * return value * -EINVAL NULL buffer passed or unsupported attribute @id. * -EPERM Identify data not valid, SMART not supported or not enabled
*/ staticint mtip_get_smart_attr(struct mtip_port *port, unsignedint id, struct smart_attr *attrib)
{ int rv, i; struct smart_attr *pattr;
if (!attrib) return -EINVAL;
if (!port->identify_valid) {
dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); return -EPERM;
} if (!(port->identify[82] & 0x1)) {
dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); return -EPERM;
} if (!(port->identify[85] & 0x1)) {
dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); return -EPERM;
}
memset(port->smart_buf, 0, ATA_SECT_SIZE);
rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma); if (rv) {
dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); return rv;
}
pattr = (struct smart_attr *)(port->smart_buf + 2); for (i = 0; i < 29; i++, pattr++) if (pattr->attr_id == id) {
memcpy(attrib, pattr, sizeof(struct smart_attr)); break;
}
if (i == 29) {
dev_warn(&port->dd->pdev->dev, "Query for invalid SMART attribute ID\n");
rv = -EINVAL;
}
return rv;
}
/* * Get the drive capacity. * * @dd Pointer to the device data structure. * @sectors Pointer to the variable that will receive the sector count. * * return value * 1 Capacity was returned successfully. * 0 The identify information is invalid.
*/ staticbool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
{ struct mtip_port *port = dd->port;
u64 total, raw0, raw1, raw2, raw3;
raw0 = port->identify[100];
raw1 = port->identify[101];
raw2 = port->identify[102];
raw3 = port->identify[103];
total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
*sectors = total; return (bool) !!port->identify_valid;
}
/* * Display the identify command data. * * @port Pointer to the port data structure. * * return value * None
*/ staticvoid mtip_dump_identify(struct mtip_port *port)
{
sector_t sectors; unsignedshort revid; char cbuf[42];
/* * Map the commands scatter list into the command table. * * @command Pointer to the command. * @nents Number of scatter list entries. * * return value * None
*/ staticinlinevoid fill_command_sg(struct driver_data *dd, struct mtip_cmd *command, int nents)
{ int n; unsignedint dma_len; struct mtip_cmd_sg *command_sg; struct scatterlist *sg;
/* * @brief Execute a drive command. * * @param port Pointer to the port data structure. * @param command Pointer to the user specified command parameters. * @param user_buffer Pointer to the user space buffer where read sector * data should be copied. * * return value 0 The command completed successfully. * return value -EFAULT An error occurred while copying the completion * data to the user space buffer. * return value -1 An error occurred while executing the command.
*/ staticint exec_drive_command(struct mtip_port *port, u8 *command, void __user *user_buffer)
{ struct host_to_dev_fis fis; struct host_to_dev_fis *reply;
u8 *buf = NULL;
dma_addr_t dma_addr = 0; int rv = 0, xfer_sz = command[3]; unsignedint to;
if (xfer_sz) { if (copy_to_user(user_buffer,
buf,
ATA_SECT_SIZE * command[3])) {
rv = -EFAULT; goto exit_drive_command;
}
}
exit_drive_command: if (buf)
dma_free_coherent(&port->dd->pdev->dev,
ATA_SECT_SIZE * xfer_sz, buf, dma_addr); return rv;
}
/* * Indicates whether a command has a single sector payload. * * @command passed to the device to perform the certain event. * @features passed to the device to perform the certain event. * * return value * 1 command is one that always has a single sector payload, * regardless of the value in the Sector Count field. * 0 otherwise *
*/ staticunsignedint implicit_sector(unsignedchar command, unsignedchar features)
{ unsignedint rv = 0;
/* list of commands that have an implicit sector count of 1 */ switch (command) { case ATA_CMD_SEC_SET_PASS: case ATA_CMD_SEC_UNLOCK: case ATA_CMD_SEC_ERASE_PREP: case ATA_CMD_SEC_ERASE_UNIT: case ATA_CMD_SEC_FREEZE_LOCK: case ATA_CMD_SEC_DISABLE_PASS: case ATA_CMD_PMP_READ: case ATA_CMD_PMP_WRITE:
rv = 1; break; case ATA_CMD_SET_MAX: if (features == ATA_SET_MAX_UNLOCK)
rv = 1; break; case ATA_CMD_SMART: if ((features == ATA_SMART_READ_VALUES) ||
(features == ATA_SMART_READ_THRESHOLDS))
rv = 1; break; case ATA_CMD_CONF_OVERLAY: if ((features == ATA_DCO_IDENTIFY) ||
(features == ATA_DCO_SET))
rv = 1; break;
} return rv;
}
if (taskout) { if (copy_to_user(buf + outtotal, outbuf, taskout)) {
err = -EFAULT; goto abort;
}
} if (taskin) { if (copy_to_user(buf + intotal, inbuf, taskin)) {
err = -EFAULT; goto abort;
}
}
abort: if (inbuf_dma)
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
DMA_FROM_DEVICE); if (outbuf_dma)
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
DMA_TO_DEVICE);
kfree(outbuf);
kfree(inbuf);
return err;
}
/* * Handle IOCTL calls from the Block Layer. * * This function is called by the Block Layer when it receives an IOCTL * command that it does not understand. If the IOCTL command is not supported * this function returns -ENOTTY. * * @dd Pointer to the driver data structure. * @cmd IOCTL command passed from the Block Layer. * @arg IOCTL argument passed from the Block Layer. * * return value * 0 The IOCTL completed successfully. * -ENOTTY The specified command is not supported. * -EFAULT An error occurred copying data to a user space buffer. * -EIO An error occurred while executing the command.
*/ staticint mtip_hw_ioctl(struct driver_data *dd, unsignedint cmd, unsignedlong arg)
{ switch (cmd) { case HDIO_GET_IDENTITY:
{ if (copy_to_user((void __user *)arg, dd->port->identify, sizeof(u16) * ATA_ID_WORDS)) return -EFAULT; break;
} case HDIO_DRIVE_CMD:
{
u8 drive_command[4];
/* Copy the user command info to our buffer. */ if (copy_from_user(drive_command,
(void __user *) arg, sizeof(drive_command))) return -EFAULT;
/* Execute the drive command. */ if (exec_drive_command(dd->port,
drive_command,
(void __user *) (arg+4))) return -EIO;
/* Copy the status back to the users buffer. */ if (copy_to_user((void __user *) arg,
drive_command, sizeof(drive_command))) return -EFAULT;
break;
} case HDIO_DRIVE_TASK:
{
u8 drive_command[7];
/* Copy the user command info to our buffer. */ if (copy_from_user(drive_command,
(void __user *) arg, sizeof(drive_command))) return -EFAULT;
/* Execute the drive command. */ if (exec_drive_task(dd->port, drive_command)) return -EIO;
/* Copy the status back to the users buffer. */ if (copy_to_user((void __user *) arg,
drive_command, sizeof(drive_command))) return -EFAULT;
break;
} case HDIO_DRIVE_TASKFILE: {
ide_task_request_t req_task; int ret, outtotal;
if (copy_from_user(&req_task, (void __user *) arg, sizeof(req_task))) return -EFAULT;
outtotal = sizeof(req_task);
ret = exec_drive_taskfile(dd, (void __user *) arg,
&req_task, outtotal);
if (copy_to_user((void __user *) arg, &req_task, sizeof(req_task))) return -EFAULT;
return ret;
}
default: return -EINVAL;
} return 0;
}
/* * Submit an IO to the hw * * This function is called by the block layer to issue an io * to the device. Upon completion, the callback function will * be called with the data parameter passed as the callback data. * * @dd Pointer to the driver data structure. * @start First sector to read. * @nsect Number of sectors to read. * @tag The tag of this read command. * @callback Pointer to the function that should be called * when the read completes. * @data Callback data passed to the callback function * when the read completes. * @dir Direction (read or write) * * return value * 0 The IO completed successfully. * -ENOMEM The DMA mapping failed.
*/ staticint mtip_hw_submit_io(struct driver_data *dd, struct request *rq, struct mtip_cmd *command, struct blk_mq_hw_ctx *hctx)
{ struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag; struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
u64 start = blk_rq_pos(rq); unsignedint nsect = blk_rq_sectors(rq); unsignedint nents;
/* Map the scatter list for DMA access */
command->scatter_ents = blk_rq_map_sg(rq, command->sg);
nents = dma_map_sg(&dd->pdev->dev, command->sg,
command->scatter_ents, dma_dir); if (!nents) return -ENOMEM;
prefetch(&port->flags);
/* * The number of retries for this command before it is * reported as a failure to the upper layers.
*/
command->retries = MTIP_MAX_RETRIES;
/* * To prevent this command from being issued * if an internal command is in progress or error handling is active.
*/ if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); return 0;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
return 0;
}
/* * Sysfs status dump. * * @dev Pointer to the device structure, passed by the kernrel. * @attr Pointer to the device_attribute structure passed by the kernel. * @buf Pointer to the char buffer that will receive the stats info. * * return value * The size, in bytes, of the data copied into buf.
*/ static ssize_t mtip_hw_show_status(struct device *dev, struct device_attribute *attr, char *buf)
{ struct driver_data *dd = dev_to_disk(dev)->private_data; int size = 0;
/* * Detect the details of the product, and store anything needed * into the driver data structure. This includes product type and * version and number of slot groups. * * @dd Pointer to the driver data structure. * * return value * None
*/ staticvoid mtip_detect_product(struct driver_data *dd)
{
u32 hwdata; unsignedint rev, slotgroups;
/* * HBA base + 0xFC [15:0] - vendor-specific hardware interface * info register: * [15:8] hardware/software interface rev# * [ 3] asic-style interface * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
*/
hwdata = readl(dd->mmio + HOST_HSORG);
while (1) { if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) goto st_out;
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
/* * the condition is to check neither an internal command is * is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
(port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) goto st_out;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag))) goto st_out;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.