/* * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/** * cxgb3_register_client - register an offload client * @client: the client * * Add the client to the client list, * and call backs the client for each activated offload device
*/ void cxgb3_register_client(struct cxgb3_client *client)
{ struct t3cdev *tdev;
if (client->add) {
list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { if (offload_activated(tdev))
client->add(tdev);
}
}
mutex_unlock(&cxgb3_db_lock);
}
EXPORT_SYMBOL(cxgb3_register_client);
/** * cxgb3_unregister_client - unregister an offload client * @client: the client * * Remove the client to the client list, * and call backs the client for each activated offload device.
*/ void cxgb3_unregister_client(struct cxgb3_client *client)
{ struct t3cdev *tdev;
val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); for (i = 0; i < 4; i++, val >>= 8)
uiip->pgsz_factor[i] = val & 0xFF;
val = t3_read_reg(adapter, A_TP_PARA_REG7);
uiip->max_txsz =
uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
(val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); /* * On tx, the iscsi pdu has to be <= tx page size and has to * fit into the Tx PM FIFO.
*/
val = min(adapter->params.tp.tx_pg_size,
t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
uiip->max_txsz = min(val, uiip->max_txsz);
/* set MaxRxData to 16224 */
val = t3_read_reg(adapter, A_TP_PARA_REG2); if ((val >> S_MAXRXDATA) != 0x3f60) {
val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
val |= V_MAXRXDATA(0x3f60);
pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
adapter->name, val);
t3_write_reg(adapter, A_TP_PARA_REG2, val);
}
/* * on rx, the iscsi pdu has to be < rx page size and the * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
S_MAXRXDATA) & M_MAXRXDATA);
uiip->max_rxsz = min(val, uiip->max_rxsz); break; case ULP_ISCSI_SET_PARAMS:
t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); /* program the ddp page sizes */ for (i = 0; i < 4; i++)
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
adapter->name, val, uiip->pgsz_factor[0],
uiip->pgsz_factor[1], uiip->pgsz_factor[2],
uiip->pgsz_factor[3]);
t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
} break; default:
ret = -EOPNOTSUPP;
} return ret;
}
/* Response queue used for RDMA events. */ #define ASYNC_NOTIF_RSPQ 0
staticint cxgb_rdma_ctl(struct adapter *adapter, unsignedint req, void *data)
{ int ret = 0;
/* may be called in any context */
spin_lock_irqsave(&adapter->sge.reg_lock, flags);
ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
rdma->credits);
spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); break;
} case RDMA_GET_MEM:{ struct ch_mem_range *t = data; struct mc7 *mem;
if ((t->addr & 7) || (t->len & 7)) return -EINVAL; if (t->mem_id == MEM_CM)
mem = &adapter->cm; elseif (t->mem_id == MEM_PMRX)
mem = &adapter->pmrx; elseif (t->mem_id == MEM_PMTX)
mem = &adapter->pmtx; else return -EINVAL;
ret =
t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
(u64 *) t->buf); if (ret) return ret; break;
} case RDMA_CQ_SETUP:{ struct rdma_cq_setup *rdma = data;
spin_lock_irq(&adapter->sge.reg_lock);
ret =
t3_sge_init_cqcntxt(adapter, rdma->id,
rdma->base_addr, rdma->size,
ASYNC_NOTIF_RSPQ,
rdma->ovfl_mode, rdma->credits,
rdma->credit_thres);
spin_unlock_irq(&adapter->sge.reg_lock); break;
} case RDMA_CQ_DISABLE:
spin_lock_irq(&adapter->sge.reg_lock);
ret = t3_sge_disable_cqcntxt(adapter, *(unsignedint *)data);
spin_unlock_irq(&adapter->sge.reg_lock); break; case RDMA_CTRL_QP_SETUP:{ struct rdma_ctrlqp_setup *rdma = data;
/* * Dummy handler for Rx offload packets in case we get an offload packet before * proper processing is setup. This complains and drops the packet as it isn't * normal to get offload packets at this stage.
*/ staticint rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, int n)
{ while (n--)
dev_kfree_skb_any(skbs[n]); return 0;
}
if (!td->nofail_skb)
td->nofail_skb =
alloc_skb(sizeof(struct cpl_tid_release),
GFP_KERNEL);
}
/* use ctx as a next pointer in the tid release list */ void cxgb3_queue_tid_release(struct t3cdev *tdev, unsignedint tid)
{ struct t3c_data *td = T3C_DATA(tdev); struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
/* * Remove a tid from the TID table. A client may defer processing its last * CPL message if it is locked at the time it arrives, and while the message * sits in the client's backlog the TID may be reused for another connection. * To handle this we atomically switch the TID association if it still points * to the original client context.
*/ void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsignedint tid)
{ struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
if (unlikely(tid >= t->ntids)) {
printk("%s: passive open TID %u too large\n",
dev->name, tid);
t3_fatal_err(tdev2adap(dev)); return CPL_RET_BUF_DONE;
}
/* * Returns an sk_buff for a reply CPL message of size len. If the input * sk_buff has no other users it is trimmed and reused, otherwise a new buffer * is allocated. The input skb must be of size at least len. Note that this * operation does not destroy the original skb data even if it decides to reuse * the buffer.
*/ staticstruct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
gfp_t gfp)
{ if (likely(!skb_cloned(skb))) {
BUG_ON(skb->len < len);
__skb_trim(skb, len);
skb_get(skb);
} else {
skb = alloc_skb(len, gfp); if (skb)
__skb_put(skb, len);
} return skb;
}
if (unlikely(tid >= t->ntids)) {
printk("%s: active establish TID %u too large\n",
dev->name, tid);
t3_fatal_err(tdev2adap(dev)); return CPL_RET_BUF_DONE;
}
/* * That skb would better have come from process_responses() where we abuse * ->priority and ->csum to carry our data. NB: if we get to per-arch * ->csum, the things might get really interesting here.
*/
staticstruct notifier_block nb = {
.notifier_call = nb_callback
};
/* * Process a received packet with an unknown/unexpected CPL opcode.
*/ staticint do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
{
pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
/* * Handlers for each CPL opcode
*/ static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
/* * Add a new handler to the CPL dispatch table. A NULL handler may be supplied * to unregister an existing handler.
*/ void t3_register_cpl_handler(unsignedint opcode, cpl_handler_func h)
{ if (opcode < NUM_CPL_CMDS)
cpl_handlers[opcode] = h ? h : do_bad_cpl; else
pr_err("T3C: handler registration for opcode %x failed\n",
opcode);
}
EXPORT_SYMBOL(t3_register_cpl_handler);
/* * T3CDEV's receive method.
*/ staticint process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
{ while (n--) { struct sk_buff *skb = *skbs++; unsignedint opcode = get_opcode(skb); int ret = cpl_handlers[opcode] (dev, skb);
#if VALIDATE_TID if (ret & CPL_RET_UNKNOWN_TID) { union opcode_tid *p = cplhdr(skb);
pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
} #endif if (ret & CPL_RET_BUF_DONE)
kfree_skb(skb);
} return 0;
}
/* * Sends an sk_buff to a T3C driver after dealing with any active network taps.
*/ int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
{ int r;
local_bh_disable();
r = dev->send(dev, skb);
local_bh_enable(); return r;
}
EXPORT_SYMBOL(cxgb3_ofld_send);
staticint is_offloading(struct net_device *dev)
{ struct adapter *adapter; int i;
staticinlineint adap2type(struct adapter *adapter)
{ int type = 0;
switch (adapter->params.rev) { case T3_REV_A:
type = T3A; break; case T3_REV_B: case T3_REV_B2:
type = T3B; break; case T3_REV_C:
type = T3C; break;
} return type;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.