/* bnx2x_sp.c: Qlogic Everest network driver. * * Copyright 2011-2013 Broadcom Corporation * Copyright (c) 2014 QLogic Corporation * All rights reserved * * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Qlogic software provided under a * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Vladislav Zolotarov *
*/
/** * bnx2x_exe_queue_add - add a new element to the execution queue * * @bp: driver handle * @o: queue * @elem: new command to add * @restore: true - do not optimize the command * * If the element is optimized or is illegal, frees it.
*/ staticinlineint bnx2x_exe_queue_add(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, struct bnx2x_exeq_elem *elem, bool restore)
{ int rc;
spin_lock_bh(&o->lock);
if (!restore) { /* Try to cancel this element queue */
rc = o->optimize(bp, o->owner, elem); if (rc) goto free_and_exit;
/* Check if this request is ok */
rc = o->validate(bp, o->owner, elem); if (rc) {
DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); goto free_and_exit;
}
}
/* If so, add it to the execution queue */
list_add_tail(&elem->link, &o->exe_queue);
/** * bnx2x_exe_queue_step - execute one execution chunk atomically * * @bp: driver handle * @o: queue * @ramrod_flags: flags * * (Should be called while holding the exe_queue->lock).
*/ staticinlineint bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, unsignedlong *ramrod_flags)
{ struct bnx2x_exeq_elem *elem, spacer; int cur_len = 0, rc;
memset(&spacer, 0, sizeof(spacer));
/* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW * which also implies there won't be any completion to clear the * 'pending' list.
*/ if (!list_empty(&o->pending_comp)) { if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else { return 1;
}
}
/* Run through the pending commands list and create a next * execution chunk.
*/ while (!list_empty(&o->exe_queue)) {
elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
link);
WARN_ON(!elem->cmd_len);
if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
cur_len += elem->cmd_len; /* Prevent from both lists being empty when moving an * element. This will allow the call of * bnx2x_exe_queue_empty() without locking.
*/
list_add_tail(&spacer.link, &o->pending_comp);
mb();
list_move_tail(&elem->link, &o->pending_comp);
list_del(&spacer.link);
} else break;
}
/* Sanity check */ if (!cur_len) return 0;
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) /* In case of an error return the commands back to the queue * and reset the pending_comp.
*/
list_splice_init(&o->pending_comp, &o->exe_queue); elseif (!rc) /* If zero is returned, means there are no outstanding pending * completions and we may dismiss the pending list.
*/
__bnx2x_exe_queue_reset_pending(bp, o);
/** * bnx2x_state_wait - wait until the given bit(state) is cleared * * @bp: device handle * @state: state which is to be cleared * @pstate: state buffer *
*/ staticinlineint bnx2x_state_wait(struct bnx2x *bp, int state, unsignedlong *pstate)
{ /* can take a while if any port is running */ int cnt = 5000;
if (CHIP_REV_IS_EMUL(bp))
cnt *= 20;
DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
/** * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step * * @bp: device handle * @o: vlan_mac object * * details Should be called under execution queue lock; notice it might release * and reclaim it during its run.
*/ staticvoid __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ int rc; unsignedlong ramrod_flags = o->saved_ramrod_flags;
/** * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run * * @bp: device handle * @o: vlan_mac object * @ramrod_flags: ramrod flags of missed execution * * Context: Should be called under execution queue lock.
*/ staticvoid __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, unsignedlong ramrod_flags)
{
o->head_exe_request = true;
o->saved_ramrod_flags = ramrod_flags;
DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
ramrod_flags);
}
/** * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock * * @bp: device handle * @o: vlan_mac object * * Context: Should be called under execution queue lock. Notice if a pending * execution exists, it would perform it - possibly releasing and * reclaiming the execution queue lock.
*/ staticvoid __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ /* It's possible a new pending execution was added since this writer * executed. If so, execute again. [Ad infinitum]
*/ while (o->head_exe_request) {
DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
__bnx2x_vlan_mac_h_exec_pending(bp, o);
}
}
/** * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock * * @bp: device handle * @o: vlan_mac object * * Context: Should be called under the execution queue lock. May sleep. May * release and reclaim execution queue lock during its run.
*/ staticint __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ /* If we got here, we're holding lock --> no WRITER exists */
o->head_reader++;
DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
o->head_reader);
return 0;
}
/** * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock * * @bp: device handle * @o: vlan_mac object * * Context: May sleep. Claims and releases execution queue lock during its run.
*/ int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ int rc;
/** * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock * * @bp: device handle * @o: vlan_mac object * * Context: Should be called under execution queue lock. Notice if a pending * execution exists, it would be performed if this was the last * reader. possibly releasing and reclaiming the execution queue lock.
*/ staticvoid __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ if (!o->head_reader) {
BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); #ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic(); #endif
} else {
o->head_reader--;
DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
o->head_reader);
}
/* It's possible a new pending execution was added, and that this reader * was last - if so we need to execute the command.
*/ if (!o->head_reader && o->head_exe_request) {
DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
/* Writer release will do the trick */
__bnx2x_vlan_mac_h_write_unlock(bp, o);
}
}
/** * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock * * @bp: device handle * @o: vlan_mac object * * Context: Notice if a pending execution exists, it would be performed if this * was the last reader. Claims and releases the execution queue lock * during its run.
*/ void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{
spin_lock_bh(&o->exe_queue.lock);
__bnx2x_vlan_mac_h_read_unlock(bp, o);
spin_unlock_bh(&o->exe_queue.lock);
}
staticint bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, int n, u8 *base, u8 stride, u8 size)
{ struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base; int counter = 0; int read_lock;
DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); if (read_lock != 0)
BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) { if (counter < n) {
memcpy(next, &pos->u, size);
counter++;
DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
counter, next);
next += stride + size;
}
}
/** * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod * * @bp: device handle * @o: queue for which we want to configure this rule * @add: if true the command is an ADD command, DEL otherwise * @opcode: CLASSIFY_RULE_OPCODE_XXX * @hdr: pointer to a header to setup *
*/ staticinlinevoid bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, bool add, int opcode, struct eth_classify_cmd_header *hdr)
{ struct bnx2x_raw_obj *raw = &o->raw;
/** * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header * * @cid: connection id * @type: BNX2X_FILTER_XXX_PENDING * @hdr: pointer to header to setup * @rule_cnt: * * currently we always configure one rule and echo field to contain a CID and an * opcode type.
*/ staticinlinevoid bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, struct eth_classify_header *hdr, int rule_cnt)
{
hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
(type << BNX2X_SWCID_SHIFT));
hdr->rule_cnt = (u8)rule_cnt;
}
/* Set LLH CAM entry: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. * * When multiple unicast ETH MACs PF configuration in switch * independent mode is required (NetQ, multiple netdev MACs, * etc.), consider better utilisation of 8 per function MAC * entries in the LLH register. There is also * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the * total number of CAM entries to 16. * * Currently we won't configure NIG for MACs other than a primary ETH * MAC and iSCSI L2 MAC. * * If this MAC is moving from one Queue to another, no need to change * NIG configuration.
*/ if (cmd != BNX2X_VLAN_MAC_MOVE) { if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
bnx2x_set_mac_in_nig(bp, add, mac,
BNX2X_LLH_CAM_ISCSI_ETH_LINE); elseif (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
bnx2x_set_mac_in_nig(bp, add, mac,
BNX2X_LLH_CAM_ETH_LINE);
}
/* Reset the ramrod data buffer for the first rule */ if (rule_idx == 0)
memset(data, 0, sizeof(*data));
/* Setup a command header */
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
&rule_entry->mac.header);
DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
(add ? "add" : "delete"), mac, raw->cl_id);
/* Set a MAC itself */
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
elem->cmd_data.vlan_mac.target_obj, true, CLASSIFY_RULE_OPCODE_MAC,
&rule_entry->mac.header);
/* Set a MAC itself */
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
rule_entry->mac.inner_mac =
cpu_to_le16(elem->cmd_data.vlan_mac.
u.mac.is_inner_mac);
}
/* Set the ramrod data header */ /* TODO: take this to the higher level in order to prevent multiple
writing */
bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
rule_cnt);
}
/** * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod * * @bp: device handle * @o: queue * @type: the type of echo * @cam_offset: offset in cam memory * @hdr: pointer to a header to setup * * E1/E1H
*/ staticinlinevoid bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, struct mac_configuration_hdr *hdr)
{ struct bnx2x_raw_obj *r = &o->raw;
if (add) {
SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_SET);
SET_FLAG(cfg_entry->flags,
MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
/* Set a MAC in a ramrod data */
bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
&cfg_entry->middle_mac_addr,
&cfg_entry->lsb_mac_addr, mac);
} else
SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
}
/* Set a VLAN itself */
rule_entry->vlan.vlan = cpu_to_le16(vlan);
/* MOVE: Add a rule that will add this MAC to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
elem->cmd_data.vlan_mac.target_obj, true, CLASSIFY_RULE_OPCODE_VLAN,
&rule_entry->vlan.header);
/* Set a VLAN itself */
rule_entry->vlan.vlan = cpu_to_le16(vlan);
}
/* Set the ramrod data header */ /* TODO: take this to the higher level in order to prevent multiple
writing */
bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
rule_cnt);
}
/* Reset the ramrod data buffer for the first rule */ if (rule_idx == 0)
memset(data, 0, sizeof(*data));
/* Set a rule header */
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
&rule_entry->pair.header);
/* Set VLAN and MAC themselves */
rule_entry->pair.vlan = cpu_to_le16(vlan);
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) { struct bnx2x_vlan_mac_obj *target_obj;
/* Set a VLAN itself */
rule_entry->pair.vlan = cpu_to_le16(vlan);
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
}
/* Set the ramrod data header */
bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
rule_cnt);
}
/** * bnx2x_set_one_vlan_mac_e1h - * * @bp: device handle * @o: bnx2x_vlan_mac_obj * @elem: bnx2x_exeq_elem * @rule_idx: rule_idx * @cam_offset: cam_offset
*/ staticvoid bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, struct bnx2x_exeq_elem *elem, int rule_idx, int cam_offset)
{ struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config =
(struct mac_configuration_cmd *)(raw->rdata); /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL
*/ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
/* Reset the ramrod data buffer */
memset(config, 0, sizeof(*config));
bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
cam_offset, add,
elem->cmd_data.vlan_mac.u.vlan_mac.mac,
elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
ETH_VLAN_FILTER_CLASSIFY, config);
}
/** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * * @bp: device handle * @p: command parameters * @ppos: pointer to the cookie * * reconfigure next MAC/VLAN/VLAN-MAC element from the * previously configured elements list. * * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * into an account * * pointer to the cookie - that should be given back in the next call to make * function handle the next element. If *ppos is set to NULL it will restart the * iterator. If returned *ppos == NULL this means that the last element has been * handled. *
*/ staticint bnx2x_vlan_mac_restore(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p, struct bnx2x_vlan_mac_registry_elem **ppos)
{ struct bnx2x_vlan_mac_registry_elem *pos; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
/* If list is empty - there is nothing to do here */ if (list_empty(&o->head)) {
*ppos = NULL; return 0;
}
/* make a step... */ if (*ppos == NULL)
*ppos = list_first_entry(&o->head, struct bnx2x_vlan_mac_registry_elem,
link); else
*ppos = list_next_entry(*ppos, link);
pos = *ppos;
/* If it's the last step - return NULL */ if (list_is_last(&pos->link, &o->head))
*ppos = NULL;
/* Prepare a 'user_req' */
memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
/* Set the command */
p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
/* Set vlan_mac_flags */
p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
/* Set a restore bit */
__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
return bnx2x_config_vlan_mac(bp, p);
}
/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a * pointer to an element with a specific criteria and NULL if such an element * hasn't been found.
*/ staticstruct bnx2x_exeq_elem *bnx2x_exeq_get_mac( struct bnx2x_exe_queue_obj *o, struct bnx2x_exeq_elem *elem)
{ struct bnx2x_exeq_elem *pos; struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
/* Check pending for execution commands */
list_for_each_entry(pos, &o->exe_queue, link) if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, sizeof(*data)) &&
(pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) return pos;
/* Check pending for execution commands */
list_for_each_entry(pos, &o->exe_queue, link) if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, sizeof(*data)) &&
(pos->cmd_data.vlan_mac.cmd ==
elem->cmd_data.vlan_mac.cmd)) return pos;
return NULL;
}
/** * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * * @bp: device handle * @qo: bnx2x_qable_obj * @elem: bnx2x_exeq_elem * * Checks that the requested configuration can be added. If yes and if * requested, consume CAM credit. * * The 'validate' is run after the 'optimize'. *
*/ staticinlineint bnx2x_validate_vlan_mac_add(struct bnx2x *bp, union bnx2x_qable_obj *qo, struct bnx2x_exeq_elem *elem)
{ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; int rc;
/* Check the registry */
rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); if (rc) {
DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); return rc;
}
/* Check if there is a pending ADD command for this * MAC/VLAN/VLAN-MAC. Return an error if there is.
*/ if (exeq->get(exeq, elem)) {
DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); return -EEXIST;
}
/* TODO: Check the pending MOVE from other objects where this * object is a destination object.
*/
/* Consume the credit if not requested not to */ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
o->get_credit(o))) return -EINVAL;
return 0;
}
/** * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed * * @bp: device handle * @qo: quable object to check * @elem: element that needs to be deleted * * Checks that the requested configuration can be deleted. If yes and if * requested, returns a CAM credit. * * The 'validate' is run after the 'optimize'.
*/ staticinlineint bnx2x_validate_vlan_mac_del(struct bnx2x *bp, union bnx2x_qable_obj *qo, struct bnx2x_exeq_elem *elem)
{ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; struct bnx2x_vlan_mac_registry_elem *pos; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem query_elem;
/* If this classification can not be deleted (doesn't exist) * - return a BNX2X_EXIST.
*/
pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); if (!pos) {
DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); return -EEXIST;
}
/* Check if there are pending DEL or MOVE commands for this * MAC/VLAN/VLAN-MAC. Return an error if so.
*/
memcpy(&query_elem, elem, sizeof(query_elem));
/* Check for MOVE commands */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; if (exeq->get(exeq, &query_elem)) {
BNX2X_ERR("There is a pending MOVE command already\n"); return -EINVAL;
}
/* Check for DEL commands */ if (exeq->get(exeq, elem)) {
DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); return -EEXIST;
}
/* Return the credit to the credit pool if not requested not to */ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
o->put_credit(o))) {
BNX2X_ERR("Failed to return a credit\n"); return -EINVAL;
}
return 0;
}
/** * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed * * @bp: device handle * @qo: quable object to check (source) * @elem: element that needs to be moved * * Checks that the requested configuration can be moved. If yes and if * requested, returns a CAM credit. * * The 'validate' is run after the 'optimize'.
*/ staticinlineint bnx2x_validate_vlan_mac_move(struct bnx2x *bp, union bnx2x_qable_obj *qo, struct bnx2x_exeq_elem *elem)
{ struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; struct bnx2x_exeq_elem query_elem; struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
/* Check if we can perform this operation based on the current registry * state.
*/ if (!src_o->check_move(bp, src_o, dest_o,
&elem->cmd_data.vlan_mac.u)) {
DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); return -EINVAL;
}
/* Check if there is an already pending DEL or MOVE command for the * source object or ADD command for a destination object. Return an * error if so.
*/
memcpy(&query_elem, elem, sizeof(query_elem));
/* Check DEL on source */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; if (src_exeq->get(src_exeq, &query_elem)) {
BNX2X_ERR("There is a pending DEL command on the source queue already\n"); return -EINVAL;
}
/* Check MOVE on source */ if (src_exeq->get(src_exeq, elem)) {
DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); return -EEXIST;
}
/* Check ADD on destination */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; if (dest_exeq->get(dest_exeq, &query_elem)) {
BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); return -EINVAL;
}
/* Consume the credit if not requested not to */ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
dest_o->get_credit(dest_o))) return -EINVAL;
if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
src_o->put_credit(src_o))) { /* return the credit taken from dest... */
dest_o->put_credit(dest_o); return -EINVAL;
}
return 0;
}
staticint bnx2x_validate_vlan_mac(struct bnx2x *bp, union bnx2x_qable_obj *qo, struct bnx2x_exeq_elem *elem)
{ switch (elem->cmd_data.vlan_mac.cmd) { case BNX2X_VLAN_MAC_ADD: return bnx2x_validate_vlan_mac_add(bp, qo, elem); case BNX2X_VLAN_MAC_DEL: return bnx2x_validate_vlan_mac_del(bp, qo, elem); case BNX2X_VLAN_MAC_MOVE: return bnx2x_validate_vlan_mac_move(bp, qo, elem); default: return -EINVAL;
}
}
staticint bnx2x_remove_vlan_mac(struct bnx2x *bp, union bnx2x_qable_obj *qo, struct bnx2x_exeq_elem *elem)
{ int rc = 0;
/* If consumption wasn't required, nothing to do */ if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags)) return 0;
switch (elem->cmd_data.vlan_mac.cmd) { case BNX2X_VLAN_MAC_ADD: case BNX2X_VLAN_MAC_MOVE:
rc = qo->vlan_mac.put_credit(&qo->vlan_mac); break; case BNX2X_VLAN_MAC_DEL:
rc = qo->vlan_mac.get_credit(&qo->vlan_mac); break; default: return -EINVAL;
}
if (rc != true) return -EINVAL;
return 0;
}
/** * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. * * @bp: device handle * @o: bnx2x_vlan_mac_obj *
*/ staticint bnx2x_wait_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o)
{ int cnt = 5000, rc; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_raw_obj *raw = &o->raw;
while (cnt--) { /* Wait for the current command to complete */
rc = raw->wait_comp(bp, raw); if (rc) return rc;
/* Wait until there are no pending commands */ if (!bnx2x_exe_queue_empty(exeq))
usleep_range(1000, 2000); else return 0;
}
DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
if (rc != 0) {
__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
/* Calling function should not differentiate between this case * and the case in which there is already a pending ramrod
*/
rc = 1;
} else {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
}
spin_unlock_bh(&o->exe_queue.lock);
return rc;
}
/** * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * * @bp: device handle * @o: bnx2x_vlan_mac_obj * @cqe: completion element * @ramrod_flags: if set schedule next execution chunk *
*/ staticint bnx2x_complete_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, union event_ring_elem *cqe, unsignedlong *ramrod_flags)
{ struct bnx2x_raw_obj *r = &o->raw; int rc;
/* Clearing the pending list & raw state should be made * atomically (as execution flow assumes they represent the same).
*/
spin_lock_bh(&o->exe_queue.lock);
/* Reset pending list */
__bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
spin_unlock_bh(&o->exe_queue.lock);
/* If ramrod failed this is most likely a SW bug */ if (cqe->message.error) return -EINVAL;
/* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) {
rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
if (rc < 0) return rc;
}
/* If there is more work to do return PENDING */ if (!bnx2x_exe_queue_empty(&o->exe_queue)) return 1;
switch (elem->cmd_data.vlan_mac.cmd) { case BNX2X_VLAN_MAC_ADD:
query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; break; case BNX2X_VLAN_MAC_DEL:
query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; break; default: /* Don't handle anything other than ADD or DEL */ return 0;
}
/* If we found the appropriate element - delete it */
pos = exeq->get(exeq, &query); if (pos) {
/* Return the credit of the optimized command */ if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&pos->cmd_data.vlan_mac.vlan_mac_flags)) { if ((query.cmd_data.vlan_mac.cmd ==
BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); return -EINVAL;
} elseif (!o->get_credit(o)) { /* VLAN_MAC_DEL */
BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); return -EINVAL;
}
}
/** * bnx2x_vlan_mac_get_registry_elem - prepare a registry element * * @bp: device handle * @o: vlan object * @elem: element * @restore: to restore or not * @re: registry * * prepare a registry element according to the current command request.
*/ staticinlineint bnx2x_vlan_mac_get_registry_elem( struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, struct bnx2x_exeq_elem *elem, bool restore, struct bnx2x_vlan_mac_registry_elem **re)
{ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; struct bnx2x_vlan_mac_registry_elem *reg_elem;
/* Allocate a new registry element if needed. */ if (!restore &&
((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); if (!reg_elem) return -ENOMEM;
/* Get a new CAM offset */ if (!o->get_cam_offset(o, ®_elem->cam_offset)) { /* This shall never happen, because we have checked the * CAM availability in the 'validate'.
*/
WARN_ON(1);
kfree(reg_elem); return -EINVAL;
}
/* Set a VLAN-MAC data */
memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, sizeof(reg_elem->u));
/* Copy the flags (needed for DEL and RESTORE flows) */
reg_elem->vlan_mac_flags =
elem->cmd_data.vlan_mac.vlan_mac_flags;
} else/* DEL, RESTORE */
reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
/* If DRIVER_ONLY execution is requested, cleanup a registry * and exit. Otherwise send a ramrod to FW.
*/ if (!drv_only) {
WARN_ON(r->check_pending(r));
/* Set pending */
r->set_pending(r);
/* Fill the ramrod data */
list_for_each_entry(elem, exe_chunk, link) {
cmd = elem->cmd_data.vlan_mac.cmd; /* We will add to the target object in MOVE command, so * change the object for a CAM search.
*/ if (cmd == BNX2X_VLAN_MAC_MOVE)
cam_obj = elem->cmd_data.vlan_mac.target_obj; else
cam_obj = o;
/* Push a new entry into the registry */ if (!restore &&
((cmd == BNX2X_VLAN_MAC_ADD) ||
(cmd == BNX2X_VLAN_MAC_MOVE)))
list_add(®_elem->link, &cam_obj->head);
/* Configure a single command in a ramrod data buffer */
o->set_one_rule(bp, o, elem, idx,
reg_elem->cam_offset);
/* MOVE command consumes 2 entries in the ramrod data */ if (cmd == BNX2X_VLAN_MAC_MOVE)
idx += 2; else
idx++;
}
/* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory * read and we will have to put a full memory barrier there * (inside bnx2x_sp_post()).
*/
/* Now, when we are done with the ramrod - clean up the registry */
list_for_each_entry(elem, exe_chunk, link) {
cmd = elem->cmd_data.vlan_mac.cmd; if ((cmd == BNX2X_VLAN_MAC_DEL) ||
(cmd == BNX2X_VLAN_MAC_MOVE)) {
reg_elem = o->check_del(bp, o,
&elem->cmd_data.vlan_mac.u);
/* * Add new elements to the execution list for commands that require it.
*/ if (!cont) {
rc = bnx2x_vlan_mac_push_new_cmd(bp, p); if (rc) return rc;
}
/* If nothing will be executed further in this iteration we want to * return PENDING if there are pending commands
*/ if (!bnx2x_exe_queue_empty(&o->exe_queue))
rc = 1;
if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
raw->clear_pending(raw);
}
/* Execute commands if required */ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
&p->ramrod_flags); if (rc < 0) return rc;
}
/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set * then user want to wait until the last command is done.
*/ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { /* Wait maximum for the current exe_queue length iterations plus * one (for the current pending command).
*/ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
max_iterations--) {
/* Wait for the current command to complete */
rc = raw->wait_comp(bp, raw); if (rc) return rc;
/* Make a next step */
rc = __bnx2x_vlan_mac_execute_step(bp,
p->vlan_mac_obj,
&p->ramrod_flags); if (rc < 0) return rc;
}
return 0;
}
return rc;
}
/** * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec * * @bp: device handle * @o: vlan object info * @vlan_mac_flags: vlan flags * @ramrod_flags: execution flags to be used for this deletion * * if the last operation has completed successfully and there are no * more elements left, positive value if the last operation has completed * successfully and there are more previously configured elements, negative * value is current operation has failed.
*/ staticint bnx2x_vlan_mac_del_all(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, unsignedlong *vlan_mac_flags, unsignedlong *ramrod_flags)
{ struct bnx2x_vlan_mac_registry_elem *pos = NULL; struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; unsignedlong flags; int read_lock; int rc = 0;
/* Add all but the last VLAN-MAC to the execution queue without actually * execution anything.
*/
__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
/* CAM pool handling */
vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; /* CAM offset is relevant for 57710 and 57711 chips only which have a * single CAM for both MACs and VLAN-MAC pairs. So the offset * will be taken from MACs' pool object only.
*/
vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
if (CHIP_IS_E1(bp)) {
BNX2X_ERR("Do not support chips others than E2\n");
BUG();
} elseif (CHIP_IS_E1H(bp)) {
vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
vlan_mac_obj->check_move = bnx2x_check_move_always_err;
vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
/* In e1x there we only take into account rx accept flag since tx switching
* isn't enabled. */ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) /* accept matched ucast */
drop_all_ucast = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.