void gve_adminq_release(struct gve_priv *priv)
{ int i = 0;
/* Tell the device the adminq is leaving */ if (priv->pdev->revision < 0x1) {
iowrite32be(0x0, &priv->reg_bar0->adminq_pfn); while (ioread32be(&priv->reg_bar0->adminq_pfn)) { /* If this is reached the device is unrecoverable and still * holding memory. Continue looping to avoid memory corruption, * but WARN so it is visible what is going on.
*/ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
WARN(1, "Unrecoverable platform error!");
i++;
msleep(GVE_ADMINQ_SLEEP_LEN);
}
} else {
iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status); while (!(ioread32be(&priv->reg_bar0->device_status)
& GVE_DEVICE_STATUS_DEVICE_IS_RESET)) { if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
WARN(1, "Unrecoverable platform error!");
i++;
msleep(GVE_ADMINQ_SLEEP_LEN);
}
}
gve_clear_device_rings_ok(priv);
gve_clear_device_resources_ok(priv);
gve_clear_admin_queue_ok(priv);
}
staticbool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
{ int i;
for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) { if (ioread32be(&priv->reg_bar0->adminq_event_counter)
== prod_cnt) returntrue;
msleep(GVE_ADMINQ_SLEEP_LEN);
}
returnfalse;
}
staticint gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{ if (status != GVE_ADMINQ_COMMAND_PASSED &&
status != GVE_ADMINQ_COMMAND_UNSET) {
dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
priv->adminq_cmd_fail++;
} switch (status) { case GVE_ADMINQ_COMMAND_PASSED: return 0; case GVE_ADMINQ_COMMAND_UNSET:
dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); return -EINVAL; case GVE_ADMINQ_COMMAND_ERROR_ABORTED: case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: case GVE_ADMINQ_COMMAND_ERROR_DATALOSS: case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION: case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE: return -EAGAIN; case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS: case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR: case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT: case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND: case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE: case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR: return -EINVAL; case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED: return -ETIME; case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED: case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED: return -EACCES; case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED: return -ENOMEM; case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: return -EOPNOTSUPP; default:
dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status); return -EINVAL;
}
}
/* Flushes all AQ commands currently queued and waits for them to complete. * If there are failures, it will return the first error.
*/ staticint gve_adminq_kick_and_wait(struct gve_priv *priv)
{ int tail, head; int i;
lockdep_assert_held(&priv->adminq_lock);
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
gve_adminq_kick_cmd(priv, head); if (!gve_adminq_wait_for_cmd(priv, head)) {
dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
priv->adminq_timeouts++; return -ENOTRECOVERABLE;
}
for (i = tail; i < head; i++) { union gve_adminq_command *cmd;
u32 status, err;
cmd = &priv->adminq[i & priv->adminq_mask];
status = be32_to_cpu(READ_ONCE(cmd->status));
err = gve_adminq_parse_err(priv, status); if (err) // Return the first error if we failed. return err;
}
return 0;
}
staticint gve_adminq_issue_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
{ union gve_adminq_command *cmd;
u32 opcode;
u32 tail;
// Check if next command will overflow the buffer. if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
(tail & priv->adminq_mask)) { int err;
// Flush existing commands to make room.
err = gve_adminq_kick_and_wait(priv); if (err) return err;
// Retry.
tail = ioread32be(&priv->reg_bar0->adminq_event_counter); if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
(tail & priv->adminq_mask)) { // This should never happen. We just flushed the // command queue so there should be enough space. return -ENOMEM;
}
}
switch (opcode) { case GVE_ADMINQ_DESCRIBE_DEVICE:
priv->adminq_describe_device_cnt++; break; case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
priv->adminq_cfg_device_resources_cnt++; break; case GVE_ADMINQ_REGISTER_PAGE_LIST:
priv->adminq_register_page_list_cnt++; break; case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
priv->adminq_unregister_page_list_cnt++; break; case GVE_ADMINQ_CREATE_TX_QUEUE:
priv->adminq_create_tx_queue_cnt++; break; case GVE_ADMINQ_CREATE_RX_QUEUE:
priv->adminq_create_rx_queue_cnt++; break; case GVE_ADMINQ_DESTROY_TX_QUEUE:
priv->adminq_destroy_tx_queue_cnt++; break; case GVE_ADMINQ_DESTROY_RX_QUEUE:
priv->adminq_destroy_rx_queue_cnt++; break; case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
priv->adminq_dcfg_device_resources_cnt++; break; case GVE_ADMINQ_SET_DRIVER_PARAMETER:
priv->adminq_set_driver_parameter_cnt++; break; case GVE_ADMINQ_REPORT_STATS:
priv->adminq_report_stats_cnt++; break; case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++; break; case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
priv->adminq_report_nic_timestamp_cnt++; break; case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++; break; case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
priv->adminq_verify_driver_compatibility_cnt++; break; case GVE_ADMINQ_QUERY_FLOW_RULES:
priv->adminq_query_flow_rules_cnt++; break; case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
priv->adminq_cfg_flow_rule_cnt++; break; case GVE_ADMINQ_CONFIGURE_RSS:
priv->adminq_cfg_rss_cnt++; break; case GVE_ADMINQ_QUERY_RSS:
priv->adminq_query_rss_cnt++; break; default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); return -EINVAL;
}
return 0;
}
staticint gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
{
u32 tail, head; int err;
mutex_lock(&priv->adminq_lock);
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt; if (tail != head) {
err = -EINVAL; goto out;
}
err = gve_adminq_issue_cmd(priv, cmd_orig); if (err) goto out;
/* The device specifies that the management vector can either be the first irq * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then * the management vector is first. * * gve arranges the msix vectors so that the management vector is last.
*/ #define GVE_NTFY_BLK_BASE_MSIX_IDX 0 int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t counter_array_bus_addr,
u32 num_counters,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks)
{ union gve_adminq_command cmd;
/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */ int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{ union gve_adminq_command cmd;
/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */ int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
{ union gve_adminq_command cmd;
staticvoid gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask, conststruct gve_device_option_jumbo_frames
*dev_op_jumbo_frames, conststruct gve_device_option_dqo_qpl
*dev_op_dqo_qpl, conststruct gve_device_option_buffer_sizes
*dev_op_buffer_sizes, conststruct gve_device_option_flow_steering
*dev_op_flow_steering, conststruct gve_device_option_rss_config
*dev_op_rss_config, conststruct gve_device_option_nic_timestamp
*dev_op_nic_timestamp, conststruct gve_device_option_modify_ring
*dev_op_modify_ring)
{ /* Before control reaches this point, the page-size-capped max MTU from * the gve_device_descriptor field has already been stored in * priv->dev->max_mtu. We overwrite it with the true max MTU below.
*/ if (dev_op_jumbo_frames &&
(supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
dev_info(&priv->pdev->dev, "JUMBO FRAMES device option enabled.\n");
priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
}
/* Override pages for qpl for DQO-QPL */ if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
}
if (dev_op_buffer_sizes &&
(supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
priv->max_rx_buffer_size =
be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
priv->header_buf_size =
be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
dev_info(&priv->pdev->dev, "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
/* Read and store ring size ranges given by device */ if (dev_op_modify_ring &&
(supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
priv->modify_ring_size_enabled = true;
/* max ring size for DQO QPL should not be overwritten because of device limit */ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
} if (priv->default_min_ring_size) { /* If device hasn't provided minimums, use default minimums */
priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
} else {
priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
}
}
if (dev_op_flow_steering &&
(supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) { if (dev_op_flow_steering->max_flow_rules) {
priv->max_flow_rules =
be32_to_cpu(dev_op_flow_steering->max_flow_rules);
priv->dev->hw_features |= NETIF_F_NTUPLE;
dev_info(&priv->pdev->dev, "FLOW STEERING device option enabled with max rule limit of %u.\n",
priv->max_flow_rules);
}
}
if (dev_op_rss_config &&
(supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
priv->rss_key_size =
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
priv->cache_rss_config = false;
dev_dbg(&priv->pdev->dev, "RSS device option enabled with key size of %u, lut size of %u.\n",
priv->rss_key_size, priv->rss_lut_size);
}
if (dev_op_nic_timestamp &&
(supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
priv->nic_timestamp_supported = true;
}
/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format * is not set to GqiRda, choose the queue format in a priority order: * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
*/ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev, "Driver is running with DQO RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
} elseif (dev_op_dqo_qpl) {
priv->queue_format = GVE_DQO_QPL_FORMAT;
supported_features_mask =
be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
} elseif (dev_op_gqi_rda) {
priv->queue_format = GVE_GQI_RDA_FORMAT;
dev_info(&priv->pdev->dev, "Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
} elseif (priv->queue_format == GVE_GQI_RDA_FORMAT) {
dev_info(&priv->pdev->dev, "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT; if (dev_op_gqi_qpl)
supported_features_mask =
be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
dev_info(&priv->pdev->dev, "Driver is running with GQI QPL queue format.\n");
}
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
gve_set_default_rss_sizes(priv);
/* DQO supports LRO. */ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
int gve_adminq_report_link_speed(struct gve_priv *priv)
{ union gve_adminq_command gvnic_cmd;
dma_addr_t link_speed_region_bus;
__be64 *link_speed_region; int err;
/* Zero-valued fields in the cmd.configure_rss instruct the device to * not update those fields.
*/
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
cmd.configure_rss = (struct gve_adminq_configure_rss) {
.hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
BIT(GVE_RSS_HASH_UDPV4) |
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
.hash_key_size =
cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
.hash_lut_size =
cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
err = gve_adminq_execute_cmd(priv, &cmd);
out: if (lut)
dma_free_coherent(&priv->pdev->dev,
priv->rss_lut_size * sizeof(*lut),
lut, lut_bus); if (key)
dma_free_coherent(&priv->pdev->dev,
priv->rss_key_size, key, key_bus); return err;
}
/* In the dma memory that the driver allocated for the device to query the flow rules, the device * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device * will write an array of rules or rule ids with the count that specified in the descriptor. * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
*/ staticint gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode, struct gve_query_flow_rules_descriptor *descriptor)
{ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
u32 num_queried_rules, total_memory_len, rule_info_len; void *rule_info;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.