switch (event) { case VIRTCHNL2_EVENT_LINK_CHANGE:
idpf_handle_event_link(adapter, v2e); return; default:
dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", event); break;
}
}
/** * idpf_mb_clean - Reclaim the send mailbox queue entries * @adapter: Driver specific private structure * * Reclaim the send mailbox queue entries to be used to send further messages * * Returns 0 on success, negative on failure
*/ staticint idpf_mb_clean(struct idpf_adapter *adapter)
{
u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN; struct idpf_ctlq_msg **q_msg; struct idpf_dma_mem *dma_mem; int err;
err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); if (err) goto err_kfree;
for (i = 0; i < num_q_msg; i++) { if (!q_msg[i]) continue;
dma_mem = q_msg[i]->ctx.indirect.payload; if (dma_mem)
dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
dma_mem->va, dma_mem->pa);
kfree(q_msg[i]);
kfree(dma_mem);
}
err_kfree:
kfree(q_msg);
return err;
}
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) /** * idpf_ptp_is_mb_msg - Check if the message is PTP-related * @op: virtchnl opcode * * Return: true if msg is PTP-related, false otherwise.
*/ staticbool idpf_ptp_is_mb_msg(u32 op)
{ switch (op) { case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: returntrue; default: returnfalse;
}
}
/** * idpf_prepare_ptp_mb_msg - Prepare PTP related message * * @adapter: Driver specific private structure * @op: virtchnl opcode * @ctlq_msg: Corresponding control queue message
*/ staticvoid idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, struct idpf_ctlq_msg *ctlq_msg)
{ /* If the message is PTP-related and the secondary mailbox is available, * send the message through the secondary mailbox.
*/ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) return;
/** * idpf_send_mb_msg - Send message over mailbox * @adapter: Driver specific private structure * @op: virtchnl opcode * @msg_size: size of the payload * @msg: pointer to buffer holding the payload * @cookie: unique SW generated cookie per message * * Will prepare the control queue message and initiates the send api * * Returns 0 on success, negative on failure
*/ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie)
{ struct idpf_ctlq_msg *ctlq_msg; struct idpf_dma_mem *dma_mem; int err;
/* If we are here and a reset is detected nothing much can be * done. This thread should silently abort and expected to * be corrected with a new run either by user or driver * flows after reset
*/ if (idpf_is_reset_detected(adapter)) return 0;
err = idpf_mb_clean(adapter); if (err) return err;
ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC); if (!ctlq_msg) return -ENOMEM;
/* It's possible we're just sending an opcode but no buffer */ if (msg && msg_size)
memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); if (err) goto send_error;
/* API for virtchnl "transaction" support ("xn" for short). * * We are reusing the completion lock to serialize the accesses to the * transaction state for simplicity, but it could be its own separate synchro * as well. For now, this API is only used from within a workqueue context; * raw_spin_lock() is enough.
*/ /** * idpf_vc_xn_lock - Request exclusive access to vc transaction * @xn: struct idpf_vc_xn* to access
*/ #define idpf_vc_xn_lock(xn) \
raw_spin_lock(&(xn)->completed.wait.lock)
/** * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object * @vcxn_mngr: pointer to vc transaction manager struct * * All waiting threads will be woken-up and their transaction aborted. Further * operations on that object will fail.
*/ void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{ int i;
/** * idpf_vc_xn_pop_free - Pop a free transaction from free list * @vcxn_mngr: transaction manager to pop from * * Returns NULL if no free transactions
*/ static struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{ struct idpf_vc_xn *xn = NULL; unsignedlong free_idx;
/** * idpf_vc_xn_push_free - Push a free transaction to free list * @vcxn_mngr: transaction manager to push to * @xn: transaction to push
*/ staticvoid idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, struct idpf_vc_xn *xn)
{
idpf_vc_xn_release_bufs(xn);
set_bit(xn->idx, vcxn_mngr->free_xn_bm);
}
/** * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction * @adapter: driver specific private structure with vcxn_mngr * @params: parameters for this particular transaction including * -vc_op: virtchannel operation to send * -send_buf: kvec iov for send buf and len * -recv_buf: kvec iov for recv buf and len (ignored if NULL) * -timeout_ms: timeout waiting for a reply (milliseconds) * -async: don't wait for message reply, will lose caller context * -async_handler: callback to handle async replies * * @returns >= 0 for success, the size of the initial reply (may or may not be * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for * error.
*/
ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, conststruct idpf_vc_xn_params *params)
{ conststruct kvec *send_buf = ¶ms->send_buf; struct idpf_vc_xn *xn;
ssize_t retval;
u16 cookie;
xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); /* no free transactions available */ if (!xn) return -ENOSPC;
idpf_vc_xn_lock(xn); if (xn->state == IDPF_VC_XN_SHUTDOWN) {
retval = -ENXIO; goto only_unlock;
} elseif (xn->state != IDPF_VC_XN_IDLE) { /* We're just going to clobber this transaction even though * it's not IDLE. If we don't reuse it we could theoretically * eventually leak all the free transactions and not be able to * send any messages. At least this way we make an attempt to * remain functional even though something really bad is * happening that's corrupting what was supposed to be free * transactions.
*/
WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
xn->idx, xn->vc_op);
}
/* No need to check the return value; we check the final state of the * transaction below. It's possible the transaction actually gets more * timeout than specified if we get preempted here but after * wait_for_completion_timeout returns. This should be non-issue * however.
*/
idpf_vc_xn_lock(xn); switch (xn->state) { case IDPF_VC_XN_SHUTDOWN:
retval = -ENXIO; goto only_unlock; case IDPF_VC_XN_WAITING:
dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op:%d cookie:%04x vc_op:%d salt:%02x timeout:%dms)\n",
params->vc_op, cookie, xn->vc_op,
xn->salt, params->timeout_ms);
retval = -ETIME; break; case IDPF_VC_XN_COMPLETED_SUCCESS:
retval = xn->reply_sz; break; case IDPF_VC_XN_COMPLETED_FAILED:
dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
params->vc_op);
retval = -EIO; break; default: /* Invalid state. */
WARN_ON_ONCE(1);
retval = -EIO; break;
}
release_and_unlock:
idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); /* If we receive a VC reply after here, it will be dropped. */
only_unlock:
idpf_vc_xn_unlock(xn);
return retval;
}
/** * idpf_vc_xn_forward_async - Handle async reply receives * @adapter: private data struct * @xn: transaction to handle * @ctlq_msg: corresponding ctlq_msg * * For async sends we're going to lose the caller's context so, if an * async_handler was provided, it can deal with the reply, otherwise we'll just * check and report if there is an error.
*/ staticint
idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, conststruct idpf_ctlq_msg *ctlq_msg)
{ int err = 0;
if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
xn->reply_sz = 0;
err = -EINVAL; goto release_bufs;
}
/** * idpf_vc_xn_forward_reply - copy a reply back to receiving thread * @adapter: driver specific private structure with vcxn_mngr * @ctlq_msg: controlq message to send back to receiving thread
*/ staticint
idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, conststruct idpf_ctlq_msg *ctlq_msg)
{ constvoid *payload = NULL;
size_t payload_size = 0; struct idpf_vc_xn *xn;
u16 msg_info; int err = 0;
u16 xn_idx;
u16 salt;
msg_info = ctlq_msg->ctx.sw_cookie.data;
xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
xn_idx); return -EINVAL;
}
xn = &adapter->vcxn_mngr->ring[xn_idx];
idpf_vc_xn_lock(xn);
salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); if (xn->salt != salt) {
dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (exp:%d@%02x(%d) != got:%d@%02x)\n",
xn->vc_op, xn->salt, xn->state,
ctlq_msg->cookie.mbx.chnl_opcode, salt);
idpf_vc_xn_unlock(xn); return -EINVAL;
}
switch (xn->state) { case IDPF_VC_XN_WAITING: /* success */ break; case IDPF_VC_XN_IDLE:
dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode);
err = -EINVAL; goto out_unlock; case IDPF_VC_XN_SHUTDOWN: /* ENXIO is a bit special here as the recv msg loop uses that * know if it should stop trying to clean the ring if we lost * the virtchnl. We need to stop playing with registers and * yield.
*/
err = -ENXIO; goto out_unlock; case IDPF_VC_XN_ASYNC:
err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
idpf_vc_xn_unlock(xn); return err; default:
dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode);
err = -EBUSY; goto out_unlock;
}
if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
xn->reply_sz = 0;
xn->state = IDPF_VC_XN_COMPLETED_FAILED;
err = -EINVAL; goto out_unlock;
}
out_unlock:
idpf_vc_xn_unlock(xn); /* we _cannot_ hold lock while calling complete */
complete(&xn->completed);
return err;
}
/** * idpf_recv_mb_msg - Receive message over mailbox * @adapter: Driver specific private structure * * Will receive control queue message and posts the receive buffer. Returns 0 * on success and negative on failure.
*/ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{ struct idpf_ctlq_msg ctlq_msg; struct idpf_dma_mem *dma_mem; int post_err, err;
u16 num_recv;
while (1) { /* This will get <= num_recv messages and output how many * actually received on num_recv.
*/
num_recv = 1;
err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); if (err || !num_recv) break;
/* If post failed clear the only buffer we supplied */ if (post_err) { if (dma_mem)
dma_free_coherent(&adapter->pdev->dev,
dma_mem->size, dma_mem->va,
dma_mem->pa); break;
}
/* virtchnl trying to shutdown, stop cleaning */ if (err == -ENXIO) break;
}
return err;
}
/** * idpf_wait_for_marker_event - wait for software marker response * @vport: virtual port data structure * * Returns 0 success, negative on failure.
**/ staticint idpf_wait_for_marker_event(struct idpf_vport *vport)
{ int event; int i;
for (i = 0; i < vport->num_txq; i++)
idpf_queue_set(SW_MARKER, vport->txqs[i]);
reply_sz = idpf_vc_xn_exec(adapter, &xn_params); if (reply_sz < 0) return reply_sz; if (reply_sz < sizeof(vvi)) return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what * version we will use.
*/ if (!adapter->virt_ver_maj &&
major != IDPF_VIRTCHNL_VERSION_MAJOR &&
minor != IDPF_VIRTCHNL_VERSION_MINOR)
err = -EAGAIN;
for (int i = 0; i < num_regions; i++) {
hw->lan_regs[i].addr_len =
le64_to_cpu(rcvd_regions->mem_reg[i].size);
hw->lan_regs[i].addr_start =
le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
}
hw->num_lan_regs = num_regions;
return err;
}
/** * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat * @adapter: Driver specific private structure * * Called when idpf_send_get_lan_memory_regions is not supported. This will * calculate the offsets and sizes for the regions before, in between, and * after the mailbox and rstat MMIO mappings. * * Return: 0 on success or error code on failure.
*/ staticint idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
{ struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; struct idpf_hw *hw = &adapter->hw;
/** * idpf_get_reg_intr_vecs - Get vector queue register offset * @vport: virtual port structure * @reg_vals: Register offsets to store in * * Returns number of registers that got populated
*/ int idpf_get_reg_intr_vecs(struct idpf_vport *vport, struct idpf_vec_regs *reg_vals)
{ struct virtchnl2_vector_chunks *chunks; struct idpf_vec_regs reg_val;
u16 num_vchunks, num_vec; int num_regs = 0, i, j;
/** * idpf_vport_get_q_reg - Get the queue registers for the vport * @reg_vals: register values needing to be set * @num_regs: amount we expect to fill * @q_type: queue model * @chunks: queue regs received over mailbox * * This function parses the queue register offsets from the queue register * chunk information, with a specific queue type and stores it into the array * passed as an argument. It returns the actual number of queue registers that * are filled.
*/ staticint idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, struct virtchnl2_queue_reg_chunks *chunks)
{
u16 num_chunks = le16_to_cpu(chunks->num_chunks); int reg_filled = 0, i;
u32 reg_val;
while (num_chunks--) { struct virtchnl2_queue_reg_chunk *chunk;
u16 num_q;
chunk = &chunks->chunks[num_chunks]; if (le32_to_cpu(chunk->type) != q_type) continue;
num_q = le32_to_cpu(chunk->num_queues);
reg_val = le64_to_cpu(chunk->qtail_reg_start); for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
reg_vals[reg_filled++] = reg_val;
reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
}
}
return reg_filled;
}
/** * __idpf_queue_reg_init - initialize queue registers * @vport: virtual port structure * @reg_vals: registers we are initializing * @num_regs: how many registers there are in total * @q_type: queue model * * Return number of queues that are initialized
*/ staticint __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, int num_regs, u32 q_type)
{ struct idpf_adapter *adapter = vport->adapter; int i, j, k = 0;
switch (q_type) { case VIRTCHNL2_QUEUE_TYPE_TX: for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
tx_qgrp->txqs[j]->tail =
idpf_get_reg_addr(adapter, reg_vals[k]);
} break; case VIRTCHNL2_QUEUE_TYPE_RX: for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) { struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
}
} break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { struct idpf_buf_queue *q;
/** * idpf_queue_reg_init - initialize queue registers * @vport: virtual port structure * * Return 0 on success, negative on failure
*/ int idpf_queue_reg_init(struct idpf_vport *vport)
{ struct virtchnl2_create_vport *vport_params; struct virtchnl2_queue_reg_chunks *chunks; struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx; int num_regs, ret = 0;
u32 *reg_vals;
/* We may never deal with more than 256 same type of queues */
reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL); if (!reg_vals) return -ENOMEM;
if (idpf_is_queue_model_split(vport->rxq_model)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
} else { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
vport->base_rxd = true;
}
if (!idpf_is_queue_model_split(vport->txq_model)) return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
}
return 0;
}
/** * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message * @vport: virtual port data structure * * Send virtchnl destroy vport message. Returns 0 on success, negative on * failure.
*/ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{ struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_vport v_id;
ssize_t reply_sz;
/* Populate the queue info buffer with all queue context info */ for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; int j, sched_mode;
/* Make sure accounting agrees */ if (k != totqs) return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid * sending a control queue message buffer that is too large
*/
config_sz = sizeof(struct virtchnl2_config_tx_queues);
chunk_sz = sizeof(struct virtchnl2_txq_info);
/* Populate the queue info buffer with all queue context info */ for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u16 num_rxq; int j;
if (!idpf_is_queue_model_split(vport->rxq_model)) goto setup_rxqs;
/* In splitq mode, RXQ buffer size should be * set to that of the first buffer queue * associated with this RXQ.
*/
rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
/* Make sure accounting agrees */ if (k != totqs) return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid * sending a control queue message buffer that is too large
*/
config_sz = sizeof(struct virtchnl2_config_rx_queues);
chunk_sz = sizeof(struct virtchnl2_rxq_info);
send_msg: /* Chunk up the queue info into multiple messages */
config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
chunk_sz = sizeof(struct virtchnl2_queue_chunk);
if (idpf_is_queue_model_split(vport->txq_model)) { if (vport->num_rxq != k - vport->num_complq) return -EINVAL;
} else { if (vport->num_rxq != k - vport->num_txq) return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
config_sz = sizeof(struct virtchnl2_queue_vector_maps);
chunk_sz = sizeof(struct virtchnl2_queue_vector);
/** * idpf_send_enable_queues_msg - send enable queues virtchnl message * @vport: Virtual port private data structure * * Will send enable queues virtchnl message. Returns 0 on success, negative on * failure.
*/ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{ return idpf_send_ena_dis_queues_msg(vport, true);
}
/** * idpf_send_disable_queues_msg - send disable queues virtchnl message * @vport: Virtual port private data structure * * Will send disable queues virtchnl message. Returns 0 on success, negative * on failure.
*/ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{ int err, i;
err = idpf_send_ena_dis_queues_msg(vport, false); if (err) return err;
/* switch to poll mode as interrupts will be disabled after disable * queues virtchnl message is sent
*/ for (i = 0; i < vport->num_txq; i++)
idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */
local_bh_disable(); for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
/** * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right * structure * @dchunks: Destination chunks to store data to * @schunks: Source chunks to copy data from * @num_chunks: number of chunks to copy
*/ staticvoid idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks, struct virtchnl2_queue_reg_chunk *schunks,
u16 num_chunks)
{
u16 i;
for (i = 0; i < num_chunks; i++) {
dchunks[i].type = schunks[i].type;
dchunks[i].start_queue_id = schunks[i].start_queue_id;
dchunks[i].num_queues = schunks[i].num_queues;
}
}
/** * idpf_send_delete_queues_msg - send delete queues virtchnl message * @vport: Virtual port private data structure * * Will send delete queues virtchnl message. Return 0 on success, negative on * failure.
*/ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; struct virtchnl2_create_vport *vport_params; struct virtchnl2_queue_reg_chunks *chunks; struct idpf_vc_xn_params xn_params = {}; struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
ssize_t reply_sz;
u16 num_chunks; int buf_size;
/** * idpf_send_config_queues_msg - Send config queues virtchnl message * @vport: Virtual port private data structure * * Will send config queues virtchnl message. Returns 0 on success, negative on * failure.
*/ int idpf_send_config_queues_msg(struct idpf_vport *vport)
{ int err;
err = idpf_send_config_tx_queues_msg(vport); if (err) return err;
return idpf_send_config_rx_queues_msg(vport);
}
/** * idpf_send_add_queues_msg - Send virtchnl add queues message * @vport: Virtual port private data structure * @num_tx_q: number of transmit queues * @num_complq: number of transmit completion queues * @num_rx_q: number of receive queues * @num_rx_bufq: number of receive buffer queues * * Returns 0 on success, negative on failure. vport _MUST_ be const here as * we should not change any fields within vport itself in this function.
*/ int idpf_send_add_queues_msg(conststruct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; struct idpf_vc_xn_params xn_params = {}; struct idpf_vport_config *vport_config; struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
ssize_t reply_sz; int size;
vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); if (!vc_msg) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.