/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* Fill ch_filter_specification with parsed match value/mask pair. */ staticint fill_match_fields(struct adapter *adap, struct ch_filter_specification *fs, struct tc_cls_u32_offload *cls, conststruct cxgb4_match_field *entry, bool next_header)
{ unsignedint i, j;
__be32 val, mask; int off, err; bool found;
for (i = 0; i < cls->knode.sel->nkeys; i++) {
off = cls->knode.sel->keys[i].off;
val = cls->knode.sel->keys[i].val;
mask = cls->knode.sel->keys[i].mask;
if (next_header) { /* For next headers, parse only keys with offmask */ if (!cls->knode.sel->keys[i].offmask) continue;
} else { /* For the remaining, parse only keys without offmask */ if (cls->knode.sel->keys[i].offmask) continue;
}
found = false;
for (j = 0; entry[j].val; j++) { if (off == entry[j].off) {
found = true;
err = entry[j].val(fs, val, mask); if (err) return err; break;
}
}
if (!found) return -EINVAL;
}
return 0;
}
/* Fill ch_filter_specification with parsed action. */ staticint fill_action_fields(struct adapter *adap, struct ch_filter_specification *fs, struct tc_cls_u32_offload *cls)
{ unsignedint num_actions = 0; conststruct tc_action *a; struct tcf_exts *exts; int i;
exts = cls->knode.exts; if (!tcf_exts_has_actions(exts)) return -EINVAL;
tcf_exts_for_each_action(i, a, exts) { /* Don't allow more than one action per rule. */ if (num_actions) return -EINVAL;
/* Drop in hardware. */ if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
num_actions++; continue;
}
/* Re-direct to specified port in hardware. */ if (is_tcf_mirred_egress_redirect(a)) { struct net_device *n_dev, *target_dev; bool found = false; unsignedint i;
/* Get a free filter entry TID, where we can insert this new * rule. Only insert rule if its prio doesn't conflict with * existing rules.
*/
filter_id = cxgb4_get_free_ftid(dev, inet_family, false,
TC_U32_NODE(cls->knode.handle)); if (filter_id < 0) {
NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); return -ENOMEM;
}
t = adapter->tc_u32;
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800) * or a valid linked bucket.
*/ if (uhtid != 0x800 && uhtid >= t->size) return -EINVAL;
/* Ensure link handle uhtid is sane, if specified. */ if (link_uhtid >= t->size) return -EINVAL;
if (uhtid != 0x800) { /* Link must exist from root node before insertion. */ if (!t->table[uhtid - 1].link_handle) return -EINVAL;
/* Link must have a valid supported next header. */
link_start = t->table[uhtid - 1].match_field; if (!link_start) return -EINVAL;
}
/* Parse links and record them for subsequent jumps to valid * next headers.
*/ if (link_uhtid) { conststruct cxgb4_next_header *next; bool found = false; unsignedint i, j;
__be32 val, mask; int off;
next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
/* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { if (next[i].sel.offoff != cls->knode.sel->offoff ||
next[i].sel.offshift != cls->knode.sel->offshift ||
next[i].sel.offmask != cls->knode.sel->offmask ||
next[i].sel.off != cls->knode.sel->off) continue;
/* Found a possible candidate. Find a key that * matches the corresponding offset, value, and * mask to jump to next header.
*/ for (j = 0; j < cls->knode.sel->nkeys; j++) {
off = cls->knode.sel->keys[j].off;
val = cls->knode.sel->keys[j].val;
mask = cls->knode.sel->keys[j].mask;
if (next[i].key.off == off &&
next[i].key.val == val &&
next[i].key.mask == mask) {
found = true; break;
}
}
if (!found) continue; /* Try next candidate. */
/* Candidate to jump to next header found. * Translate all keys to internal specification * and store them in jump table. This spec is copied * later to set the actual filters.
*/
ret = fill_match_fields(adapter, &fs, cls,
start, false); if (ret) goto out;
/* No candidate found to jump to next header. */ if (!found) return -EINVAL;
return 0;
}
/* Fill ch_filter_specification match fields to be shipped to hardware. * Copy the linked spec (if any) first. And then update the spec as * needed.
*/ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) { /* Copy linked ch_filter_specification */
memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
ret = fill_match_fields(adapter, &fs, cls,
link_start, true); if (ret) goto out;
}
ret = fill_match_fields(adapter, &fs, cls, start, false); if (ret) goto out;
/* Fill ch_filter_specification action fields to be shipped to * hardware.
*/
ret = fill_action_fields(adapter, &fs, cls); if (ret) goto out;
/* The filter spec has been completely built from the info * provided from u32. We now set some default fields in the * spec for sanity.
*/
/* Match only packets coming from the ingress port where this * filter will be created.
*/
fs.val.iport = netdev2pinfo(dev)->port_id;
fs.mask.iport = ~0;
/* Enable filter hit counts. */
fs.hitcnts = 1;
/* Set type of filter - IPv6 or IPv4 */
fs.type = is_ipv6 ? 1 : 0;
/* Set the filter */
ret = cxgb4_set_filter(dev, filter_id, &fs); if (ret) goto out;
/* If this is a linked bucket, then set the corresponding * entry in the bitmap to mark it as belonging to this linked * bucket.
*/ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
set_bit(filter_id, t->table[uhtid - 1].tid_map);
out: return ret;
}
int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
{ struct adapter *adapter = netdev2adap(dev); unsignedint filter_id, max_tids, i, j; struct cxgb4_link *link = NULL; struct cxgb4_tc_u32_table *t; struct filter_entry *f; bool found = false;
u32 handle, uhtid;
u8 nslots; int ret;
if (!can_tc_u32_offload(dev)) return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
max_tids = adapter->tids.nhpftids + adapter->tids.nftids;
spin_lock_bh(&adapter->tids.ftid_lock);
filter_id = 0; while (filter_id < max_tids) { if (filter_id < adapter->tids.nhpftids) {
i = filter_id;
f = &adapter->tids.hpftid_tab[i]; if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
found = true; break;
}
i = find_next_bit(adapter->tids.hpftid_bmap,
adapter->tids.nhpftids, i + 1); if (i >= adapter->tids.nhpftids) {
filter_id = adapter->tids.nhpftids; continue;
}
filter_id = i;
} else {
i = filter_id - adapter->tids.nhpftids;
f = &adapter->tids.ftid_tab[i]; if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
found = true; break;
}
i = find_next_bit(adapter->tids.ftid_bmap,
adapter->tids.nftids, i + 1); if (i >= adapter->tids.nftids) break;
filter_id = i + adapter->tids.nhpftids;
}
nslots = 0; if (f->fs.type) {
nslots++; if (CHELSIO_CHIP_VERSION(adapter->params.chip) <
CHELSIO_T6)
nslots += 2;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.