// SPDX-License-Identifier: GPL-2.0+ /* * IBM Power Systems Virtual Management Channel Support. * * Copyright (c) 2004, 2018 IBM Corp. * Dave Engebretsen engebret@us.ibm.com * Steven Royer seroyer@linux.vnet.ibm.com * Adam Reznechek adreznec@linux.vnet.ibm.com * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
*/
/** * ibmvmc_reset_crq_queue - Reset CRQ Queue * * @adapter: crq_server_adapter struct * * This function calls h_free_crq and then calls H_REG_CRQ and does all the * bookkeeping to get us back to where we can communicate. * * Return: * 0 - Success * Non-Zero - Failure
*/ staticint ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
{ struct vio_dev *vdev = to_vio_dev(adapter->dev); struct crq_queue *queue = &adapter->queue; int rc = 0;
/* Close the CRQ */
h_free_crq(vdev->unit_address);
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);
queue->cur = 0;
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE); if (rc == 2) /* Adapter is good, but other end is not ready */
dev_warn(adapter->dev, "Partner adapter not ready\n"); elseif (rc != 0)
dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
return rc;
}
/** * crq_queue_next_crq: - Returns the next entry in message queue * @queue: crq_queue to use * * Returns pointer to next entry in queue, or NULL if there are no new * entried in the CRQ.
*/ staticstruct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
{ struct ibmvmc_crq_msg *crq; unsignedlong flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur]; if (crq->valid & 0x80) { if (++queue->cur == queue->size)
queue->cur = 0;
/* Ensure the read of the valid bit occurs before reading any * other bits of the CRQ entry
*/
dma_rmb();
} else {
crq = NULL;
}
/* * Ensure the command buffer is flushed to memory before handing it * over to the other side to prevent it from fetching any stale data.
*/
dma_wmb();
rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
return rc;
}
/** * alloc_dma_buffer - Create DMA Buffer * * @vdev: vio_dev struct * @size: Size field * @dma_handle: DMA address field * * Allocates memory for the command queue and maps remote memory into an * ioba. * * Returns a pointer to the buffer
*/ staticvoid *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
dma_addr_t *dma_handle)
{ /* allocate memory */ void *buffer = kzalloc(size, GFP_ATOMIC);
/** * ibmvmc_send_open - Interface Open * @buffer: Pointer to ibmvmc_buffer struct * @hmc: Pointer to ibmvmc_hmc struct * * This command is sent by the management partition as the result of a * management partition device request. It causes the hypervisor to * prepare a set of data buffers for the management application connection * indicated HMC idx. A unique HMC Idx would be used if multiple management * applications running concurrently were desired. Before responding to this * command, the hypervisor must provide the management partition with at * least one of these new buffers via the Add Buffer. This indicates whether * the messages are inbound or outbound from the hypervisor. * * Return: * 0 - Success * Non-zero - Failure
*/ staticint ibmvmc_send_open(struct ibmvmc_buffer *buffer, struct ibmvmc_hmc *hmc)
{ struct ibmvmc_crq_msg crq_msg; struct crq_server_adapter *adapter;
__be64 *crq_as_u64 = (__be64 *)&crq_msg; int rc = 0;
/** * ibmvmc_send_close - Interface Close * @hmc: Pointer to ibmvmc_hmc struct * * This command is sent by the management partition to terminate a * management application to hypervisor connection. When this command is * sent, the management partition has quiesced all I/O operations to all * buffers associated with this management application connection, and * has freed any storage for these buffers. * * Return: * 0 - Success * Non-zero - Failure
*/ staticint ibmvmc_send_close(struct ibmvmc_hmc *hmc)
{ struct ibmvmc_crq_msg crq_msg; struct crq_server_adapter *adapter;
__be64 *crq_as_u64 = (__be64 *)&crq_msg; int rc = 0;
/** * ibmvmc_send_capabilities - Send VMC Capabilities * * @adapter: crq_server_adapter struct * * The capabilities message is an administrative message sent after the CRQ * initialization sequence of messages and is used to exchange VMC capabilities * between the management partition and the hypervisor. The management * partition must send this message and the hypervisor must respond with VMC * capabilities Response message before HMC interface message can begin. Any * HMC interface messages received before the exchange of capabilities has * complete are dropped. * * Return: * 0 - Success
*/ staticint ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
{ struct ibmvmc_admin_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
/** * ibmvmc_send_add_buffer_resp - Add Buffer Response * * @adapter: crq_server_adapter struct * @status: Status field * @hmc_session: HMC Session field * @hmc_index: HMC Index field * @buffer_id: Buffer Id field * * This command is sent by the management partition to the hypervisor in * response to the Add Buffer message. The Status field indicates the result of * the command. * * Return: * 0 - Success
*/ staticint ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
u8 status, u8 hmc_session,
u8 hmc_index, u16 buffer_id)
{ struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
/** * ibmvmc_send_rem_buffer_resp - Remove Buffer Response * * @adapter: crq_server_adapter struct * @status: Status field * @hmc_session: HMC Session field * @hmc_index: HMC Index field * @buffer_id: Buffer Id field * * This command is sent by the management partition to the hypervisor in * response to the Remove Buffer message. The Buffer ID field indicates * which buffer the management partition selected to remove. The Status * field indicates the result of the command. * * Return: * 0 - Success
*/ staticint ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
u8 status, u8 hmc_session,
u8 hmc_index, u16 buffer_id)
{ struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
/** * ibmvmc_send_msg - Signal Message * * @adapter: crq_server_adapter struct * @buffer: ibmvmc_buffer struct * @hmc: ibmvmc_hmc struct * @msg_len: message length field * * This command is sent between the management partition and the hypervisor * in order to signal the arrival of an HMC protocol message. The command * can be sent by both the management partition and the hypervisor. It is * used for all traffic between the management application and the hypervisor, * regardless of who initiated the communication. * * There is no response to this message. * * Return: * 0 - Success * Non-zero - Failure
*/ staticint ibmvmc_send_msg(struct crq_server_adapter *adapter, struct ibmvmc_buffer *buffer, struct ibmvmc_hmc *hmc, int msg_len)
{ struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg; int rc = 0;
dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
rc = h_copy_rdma(msg_len,
adapter->liobn,
buffer->dma_addr_local,
adapter->riobn,
buffer->dma_addr_remote); if (rc) {
dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
rc); return rc;
}
session = file->private_data; if (!session) return -EIO;
hmc = session->hmc; if (!hmc) return -EIO;
spin_lock_irqsave(&hmc->lock, flags); if (hmc->state == ibmhmc_state_free) { /* HMC connection is not valid (possibly was reset under us). */
ret = -EIO; goto out;
}
adapter = hmc->adapter; if (!adapter) {
ret = -EIO; goto out;
}
if (count > ibmvmc.max_mtu) {
dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
(unsignedlong)count);
ret = -EIO; goto out;
}
/* Waiting for the open resp message to the ioctl(1) - retry */ if (hmc->state == ibmhmc_state_opening) {
ret = -EBUSY; goto out;
}
/* Make sure the ioctl() was called & the open msg sent, and that * the HMC connection has not failed.
*/ if (hmc->state != ibmhmc_state_ready) {
ret = -EIO; goto out;
}
vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); if (!vmc_buffer) { /* No buffer available for the msg send, or we have not yet * completed the open/open_resp sequence. Retry until this is * complete.
*/
ret = -EBUSY; goto out;
} if (!vmc_buffer->real_addr_local) {
dev_err(adapter->dev, "no buffer storage assigned\n");
ret = -EIO; goto out;
}
buf = vmc_buffer->real_addr_local;
while (c > 0) {
bytes = min_t(size_t, c, vmc_buffer->size);
bytes -= copy_from_user(buf, p, bytes); if (!bytes) {
ret = -EFAULT; goto out;
}
c -= bytes;
p += bytes;
} if (p == buffer) goto out;
if (ibmvmc.state < ibmvmc_state_ready) {
pr_warn("ibmvmc: Reserve HMC: not state_ready\n"); return -EAGAIN;
}
/* Device is busy until capabilities have been exchanged and we * have a generic buffer for each possible HMC connection.
*/ for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
valid = 0;
ibmvmc_count_hmc_buffers(index, &valid, &free); if (valid == 0) {
pr_warn("ibmvmc: buffers not ready for index %d\n",
index); return -ENOBUFS;
}
}
/* Get an hmc object, and transition to ibmhmc_state_initial */
hmc = ibmvmc_get_free_hmc(); if (!hmc) {
pr_warn("%s: free hmc not found\n", __func__); return -EBUSY;
}
if (!buffer || !buffer->real_addr_local) {
pr_warn("ibmvmc: sethmcid: no buffer available\n"); return -EIO;
}
strscpy(print_buffer, hmc->hmc_id, sizeof(print_buffer));
pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN); /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
rc = ibmvmc_send_open(buffer, hmc);
/** * ibmvmc_add_buffer - Add Buffer * * @adapter: crq_server_adapter struct * @crq: ibmvmc_crq_msg struct * * This message transfers a buffer from hypervisor ownership to management * partition ownership. The LIOBA is obtained from the virtual TCE table * associated with the hypervisor side of the VMC device, and points to a * buffer of size MTU (as established in the capabilities exchange). * * Typical flow for ading buffers: * 1. A new management application connection is opened by the management * partition. * 2. The hypervisor assigns new buffers for the traffic associated with * that connection. * 3. The hypervisor sends VMC Add Buffer messages to the management * partition, informing it of the new buffers. * 4. The hypervisor sends an HMC protocol message (to the management * application) notifying it of the new buffers. This informs the * application that it has buffers available for sending HMC * commands. * * Return: * 0 - Success * Non-zero - Failure
*/ staticint ibmvmc_add_buffer(struct crq_server_adapter *adapter, struct ibmvmc_crq_msg *crq)
{ struct ibmvmc_buffer *buffer;
u8 hmc_index;
u8 hmc_session;
u16 buffer_id; unsignedlong flags; int rc = 0;
buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
buffer->size = ibmvmc.max_mtu;
buffer->owner = crq->var1.owner;
buffer->free = 1; /* Must ensure valid==1 is observable only after all other fields are */
dma_wmb();
buffer->valid = 1;
buffer->id = buffer_id;
/** * ibmvmc_rem_buffer - Remove Buffer * * @adapter: crq_server_adapter struct * @crq: ibmvmc_crq_msg struct * * This message requests an HMC buffer to be transferred from management * partition ownership to hypervisor ownership. The management partition may * not be able to satisfy the request at a particular point in time if all its * buffers are in use. The management partition requires a depth of at least * one inbound buffer to allow management application commands to flow to the * hypervisor. It is, therefore, an interface error for the hypervisor to * attempt to remove the management partition's last buffer. * * The hypervisor is expected to manage buffer usage with the management * application directly and inform the management partition when buffers may be * removed. The typical flow for removing buffers: * * 1. The management application no longer needs a communication path to a * particular hypervisor function. That function is closed. * 2. The hypervisor and the management application quiesce all traffic to that * function. The hypervisor requests a reduction in buffer pool size. * 3. The management application acknowledges the reduction in buffer pool size. * 4. The hypervisor sends a Remove Buffer message to the management partition, * informing it of the reduction in buffers. * 5. The management partition verifies it can remove the buffer. This is * possible if buffers have been quiesced. * * Return: * 0 - Success * Non-zero - Failure
*/ /* * The hypervisor requested that we pick an unused buffer, and return it. * Before sending the buffer back, we free any storage associated with the * buffer.
*/ staticint ibmvmc_rem_buffer(struct crq_server_adapter *adapter, struct ibmvmc_crq_msg *crq)
{ struct ibmvmc_buffer *buffer;
u8 hmc_index;
u8 hmc_session;
u16 buffer_id = 0; unsignedlong flags; int rc = 0;
if (hmc->state == ibmhmc_state_free) {
dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
hmc->state); /* HMC connection is not valid (possibly was reset under us). */
spin_unlock_irqrestore(&hmc->lock, flags); return -1;
}
buffer = &hmc->buffer[buffer_id];
if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
buffer->valid, buffer->owner);
spin_unlock_irqrestore(&hmc->lock, flags); return -1;
}
/* RDMA the data into the partition. */
rc = h_copy_rdma(msg_len,
adapter->riobn,
buffer->dma_addr_remote,
adapter->liobn,
buffer->dma_addr_local);
if (rc) {
dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
rc);
spin_unlock_irqrestore(&hmc->lock, flags); return -1;
}
/* Must be locked because read operates on the same data */
hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
hmc->queue_head++; if (hmc->queue_head == ibmvmc_max_buf_pool_size)
hmc->queue_head = 0;
if (hmc->queue_head == hmc->queue_tail)
dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
if (hmcs[hmc_index].session != crq->hmc_session) {
dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
hmcs[hmc_index].session, crq->hmc_session); return -1;
}
return 0;
}
/** * ibmvmc_reset - Reset * * @adapter: crq_server_adapter struct * @xport_event: export_event field * * Closes all HMC sessions and conditionally schedules a CRQ reset. * @xport_event: If true, the partner closed their CRQ; we don't need to reset. * If false, we need to schedule a CRQ reset.
*/ staticvoid ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
{ int i;
if (ibmvmc.state != ibmvmc_state_sched_reset) {
dev_info(adapter->dev, "*** Reset to initial state.\n"); for (i = 0; i < ibmvmc_max_hmcs; i++)
ibmvmc_return_hmc(&hmcs[i], xport_event);
if (xport_event) { /* CRQ was closed by the partner. We don't need to do * anything except set ourself to the correct state to * handle init msgs.
*/
ibmvmc.state = ibmvmc_state_crqinit;
} else { /* The partner did not close their CRQ - instead, we're * closing the CRQ on our end. Need to schedule this * for process context, because CRQ reset may require a * sleep. * * Setting ibmvmc.state here immediately prevents * ibmvmc_open from completing until the reset * completes in process context.
*/
ibmvmc.state = ibmvmc_state_sched_reset;
dev_dbg(adapter->dev, "Device reset scheduled");
wake_up_interruptible(&adapter->reset_wait_queue);
}
}
}
/** * ibmvmc_reset_task - Reset Task * * @data: Data field * * Performs a CRQ reset of the VMC device in process context. * NOTE: This function should not be called directly, use ibmvmc_reset.
*/ staticint ibmvmc_reset_task(void *data)
{ struct crq_server_adapter *adapter = data; int rc;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
wait_event_interruptible(adapter->reset_wait_queue,
(ibmvmc.state == ibmvmc_state_sched_reset) ||
kthread_should_stop());
if (kthread_should_stop()) break;
dev_dbg(adapter->dev, "CRQ resetting in process context");
tasklet_disable(&adapter->work_task);
/** * ibmvmc_process_open_resp - Process Open Response * * @crq: ibmvmc_crq_msg struct * @adapter: crq_server_adapter struct * * This command is sent by the hypervisor in response to the Interface * Open message. When this message is received, the indicated buffer is * again available for management partition use.
*/ staticvoid ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq, struct crq_server_adapter *adapter)
{ unsignedchar hmc_index; unsignedshort buffer_id;
hmc_index = crq->hmc_index; if (hmc_index > ibmvmc.max_hmc_index) { /* Why would PHYP give an index > max negotiated? */
ibmvmc_reset(adapter, false); return;
}
if (crq->status) {
dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
crq->status);
ibmvmc_return_hmc(&hmcs[hmc_index], false); return;
}
/** * ibmvmc_process_close_resp - Process Close Response * * @crq: ibmvmc_crq_msg struct * @adapter: crq_server_adapter struct * * This command is sent by the hypervisor in response to the managemant * application Interface Close message. * * If the close fails, simply reset the entire driver as the state of the VMC * must be in tough shape.
*/ staticvoid ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq, struct crq_server_adapter *adapter)
{ unsignedchar hmc_index;
while (!done) { /* Pull all the valid messages off the CRQ */ while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
ibmvmc_handle_crq(crq, adapter);
crq->valid = 0x00; /* CRQ reset was requested, stop processing CRQs. * Interrupts will be re-enabled by the reset task.
*/ if (ibmvmc.state == ibmvmc_state_sched_reset) return;
}
vio_enable_interrupts(vdev);
crq = crq_queue_next_crq(&adapter->queue); if (crq) {
vio_disable_interrupts(vdev);
ibmvmc_handle_crq(crq, adapter);
crq->valid = 0x00; /* CRQ reset was requested, stop processing CRQs. * Interrupts will be re-enabled by the reset task.
*/ if (ibmvmc.state == ibmvmc_state_sched_reset) return;
} else {
done = 1;
}
}
}
req_irq_failed: /* Cannot have any work since we either never got our IRQ registered, * or never got interrupts enabled
*/
tasklet_kill(&adapter->work_task);
h_free_crq(vdev->unit_address);
reg_crq_failed:
dma_unmap_single(adapter->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsignedlong)queue->msgs);
malloc_failed: return -ENOMEM;
}
/* Fill in the liobn and riobn fields on the adapter */ staticint read_dma_window(struct vio_dev *vdev, struct crq_server_adapter *adapter)
{ const __be32 *dma_window; const __be32 *prop;
/* TODO Using of_parse_dma_window would be better, but it doesn't give * a way to read multiple windows without already knowing the size of * a window or the number of windows
*/
dma_window =
(const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
NULL); if (!dma_window) {
dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n"); return -1;
}
/* Try to send an initialization message. Note that this is allowed * to fail if the other end is not acive. In that case we just wait * for the other side to initialize.
*/ if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
rc != H_RESOURCE)
dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.