// SPDX-License-Identifier: GPL-2.0-or-later /* * Authors: * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> * Uppsala University and * Swedish University of Agricultural Sciences * * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Ben Greear <greearb@candelatech.com> * Jens Låås <jens.laas@data.slu.se> * * A tool for loading the network with preconfigurated packets. * The tool is implemented as a linux module. Parameters are output * device, delay (to hard_xmit), number of packets, and whether * to use multiple SKBs or just the same one. * pktgen uses the installed interface's output routine. * * Additional hacking by: * * Jens.Laas@data.slu.se * Improved by ANK. 010120. * Improved by ANK even more. 010212. * MAC address typo fixed. 010417 --ro * Integrated. 020301 --DaveM * Added multiskb option 020301 --DaveM * Scaling of results. 020417--sigurdur@linpro.no * Significant re-work of the module: * * Convert to threaded model to more efficiently be able to transmit * and receive on multiple interfaces at once. * * Converted many counters to __u64 to allow longer runs. * * Allow configuration of ranges, like min/max IP address, MACs, * and UDP-ports, for both source and destination, and can * set to use a random distribution or sequentially walk the range. * * Can now change most values after starting. * * Place 12-byte packet in UDP payload with magic number, * sequence number, and timestamp. * * Add receiver code that detects dropped pkts, re-ordered pkts, and * latencies (with micro-second) precision. * * Add IOCTL interface to easily get counters & configuration. * --Ben Greear <greearb@candelatech.com> * * Renamed multiskb to clone_skb and cleaned up sending core for two distinct * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 * as a "fastpath" with a configurable number of clones after alloc's. * clone_skb=0 means all packets are allocated this also means ranges time * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 * clones. * * Also moved to /proc/net/pktgen/ * --ro * * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever * mistakes. Also merged in DaveM's patch in the -pre6 patch. * --Ben Greear <greearb@candelatech.com> * * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) * * 021124 Finished major redesign and rewrite for new functionality. * See Documentation/networking/pktgen.rst for how to use this. * * The new operation: * For each CPU one thread/process is created at start. This process checks * for running devices in the if_list and sends packets until count is 0 it * also the thread checks the thread->control which is used for inter-process * communication. controlling process "posts" operations to the threads this * way. * The if_list is RCU protected, and the if_lock remains to protect updating * of if_list, from "add_device" as it invoked from userspace (via proc write). * * By design there should only be *one* "controlling" process. In practice * multiple write accesses gives unpredictable result. Understood by "write" * to /proc gives result code that should be read be the "writer". * For practical use this should be no problem. * * Note when adding devices to a specific CPU there good idea to also assign * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. * --ro * * Fix refcount off by one if first packet fails, potential null deref, * memleak 030710- KJP * * First "ranges" functionality for ipv6 030726 --ro * * Included flow support. 030802 ANK. * * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> * * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 * * New xmit() return, do_div and misc clean up by Stephen Hemminger * <shemminger@osdl.org> 040923 * * Randy Dunlap fixed u64 printk compiler warning * * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 * * Corrections from Nikolai Malykh (nmalykh@bilim.com) * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 * * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> * 050103 * * MPLS support by Steven Whitehouse <steve@chygwyn.com> * * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> * * Fixed src_mac command to set source mac of packet to value specified in * command by Adit Ranadive <adit.262@gmail.com>
*/
#define VERSION "2.75" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ #define MPLS_STACK_BOTTOM htonl(0x00000100) /* Max number of internet mix entries that can be specified in imix_weights. */ #define MAX_IMIX_ENTRIES 20 #define IMIX_PRECISION 100 /* Precision of IMIX distribution */
/* flow flag bits */ #define F_INIT (1<<0) /* flow has been initialized */
struct pktgen_dev { /* * Try to keep frequent/infrequent used vars. separated.
*/ struct proc_dir_entry *entry; /* proc file */ struct pktgen_thread *pg_thread;/* the owner */ struct list_head list; /* chaining in the thread's run-queue */ struct rcu_head rcu; /* freed by RCU */
int running; /* if false, the test will stop */
/* If min != max, then we will either do a linear iteration, or * we will do a random selection from within the range.
*/
__u32 flags; int xmit_mode; int min_pkt_size; int max_pkt_size; int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ int nfrags; int removal_mark; /* non-zero => the device is marked for * removal by worker thread
*/
struct page *page;
u64 delay; /* nano-seconds */
__u64 count; /* Default No packets to send */
__u64 sofar; /* How many pkts we've sent so far */
__u64 tx_bytes; /* How many bytes we've transmitted */
__u64 errors; /* Errors when trying to transmit, */
/* runtime counters relating to clone_skb */
__u32 clone_count; int last_ok; /* Was last skb sent? * Or a failed transmit of some sort? * This will keep sequence numbers in order
*/
ktime_t next_tx;
ktime_t started_at;
ktime_t stopped_at;
u64 idle_acc; /* nano-seconds */
__u32 seq_num;
int clone_skb; /* * Use multiple SKBs during packet gen. * If this number is greater than 1, then * that many copies of the same packet will be * sent before a new packet is allocated. * If you want to send 1024 identical packets * before creating a new packet, * set clone_skb to 1024.
*/
/* If we're doing ranges, random or incremental, then this * defines the min/max for those ranges.
*/
__be32 saddr_min; /* inclusive, source IP address */
__be32 saddr_max; /* exclusive, source IP address */
__be32 daddr_min; /* inclusive, dest IP address */
__be32 daddr_max; /* exclusive, dest IP address */
__u16 udp_src_min; /* inclusive, source UDP port */
__u16 udp_src_max; /* exclusive, source UDP port */
__u16 udp_dst_min; /* inclusive, dest UDP port */
__u16 udp_dst_max; /* exclusive, dest UDP port */
/* DSCP + ECN */
__u8 tos; /* six MSB of (former) IPv4 TOS * are for dscp codepoint
*/
__u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 * (see RFC 3260, sec. 4)
*/
/* IMIX */ unsignedint n_imix_entries; struct imix_pkt imix_entries[MAX_IMIX_ENTRIES]; /* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/
__u8 imix_distribution[IMIX_PRECISION];
/* MPLS */ unsignedint nr_labels; /* Depth of stack, 0 = no MPLS */
__be32 labels[MAX_MPLS_LABELS];
/* VLAN/SVLAN (802.1Q/Q-in-Q) */
__u8 vlan_p;
__u8 vlan_cfi;
__u16 vlan_id; /* 0xffff means no vlan tag */
__u8 svlan_p;
__u8 svlan_cfi;
__u16 svlan_id; /* 0xffff means no svlan tag */
__u32 src_mac_count; /* How many MACs to iterate through */
__u32 dst_mac_count; /* How many MACs to iterate through */
__u8 hh[14]; /* = { * 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, * * We fill in SRC address later * 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, * 0x08, 0x00 * };
*/
__u16 pad; /* pad out the hh struct to an even 16 bytes */
struct sk_buff *skb; /* skb we are to transmit next, used for when we * are transmitting the same one multiple times
*/ struct net_device *odev; /* The out-going device. * Note that the device should have it's * pg_info pointer pointing back to this * device. * Set when the user specifies the out-going * device name (not when the inject is * started as it used to do.)
*/
netdevice_tracker dev_tracker; char odevname[32]; struct flow_state *flows; unsignedint cflows; /* Concurrent flows (config) */ unsignedint lflow; /* Flow length (config) */ unsignedint nflows; /* accumulated flows (stats) */ unsignedint curfl; /* current sequenced flow (state)*/
u16 queue_map_min;
u16 queue_map_max;
__u32 skb_priority; /* skb priority field */ unsignedint burst; /* number of duplicated packets to burst */ int node; /* Memory node */
/* Parses imix entries from user buffer. * The user buffer should consist of imix entries separated by spaces * where each entry consists of size and weight delimited by commas. * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example.
*/ static ssize_t get_imix_entries(constchar __user *buffer,
size_t maxlen, struct pktgen_dev *pkt_dev)
{
size_t i = 0, max;
ssize_t len; char c;
pkt_dev->n_imix_entries = 0;
do { unsignedlong weight; unsignedlong size;
if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) return -E2BIG;
if (i >= maxlen) return -EINVAL;
max = min(10, maxlen - i);
len = num_arg(&buffer[i], max, &size); if (len < 0) return len;
i += len; if (i >= maxlen) return -EINVAL; if (get_user(c, &buffer[i])) return -EFAULT; /* Check for comma between size_i and weight_i */ if (c != ',') return -EINVAL;
i++; if (i >= maxlen) return -EINVAL;
if (size < 14 + 20 + 8)
size = 14 + 20 + 8;
max = min(10, maxlen - i);
len = num_arg(&buffer[i], max, &weight); if (len < 0) return len; if (weight <= 0) return -EINVAL;
max = min(8, maxlen - i);
len = hex32_arg(&buffer[i], max, &tmp); if (len < 0) return len;
/* return empty list in case of invalid input or zero value */ if (len == 0 || tmp == 0) return maxlen;
pkt_dev->labels[n] = htonl(tmp); if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM)
pkt_dev->flags |= F_MPLS_RND;
i += len;
n++; if (i >= maxlen) break; if (get_user(c, &buffer[i])) return -EFAULT;
i++;
} while (c == ',');
sprintf(pg_result, "OK: delay=%llu",
(unsignedlonglong) pkt_dev->delay); return count;
} if (!strcmp(name, "rate")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (!value) return -EINVAL;
pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; if (debug)
pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
sprintf(pg_result, "OK: rate=%lu", value); return count;
} if (!strcmp(name, "ratep")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (!value) return -EINVAL;
pkt_dev->delay = NSEC_PER_SEC/value; if (debug)
pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
sprintf(pg_result, "OK: rate=%lu", value); return count;
} if (!strcmp(name, "udp_src_min")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (value != pkt_dev->udp_src_min) {
pkt_dev->udp_src_min = value;
pkt_dev->cur_udp_src = value;
}
sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); return count;
} if (!strcmp(name, "udp_dst_min")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (value != pkt_dev->udp_dst_min) {
pkt_dev->udp_dst_min = value;
pkt_dev->cur_udp_dst = value;
}
sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); return count;
} if (!strcmp(name, "udp_src_max")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (value != pkt_dev->udp_src_max) {
pkt_dev->udp_src_max = value;
pkt_dev->cur_udp_src = value;
}
sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); return count;
} if (!strcmp(name, "udp_dst_max")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (value != pkt_dev->udp_dst_max) {
pkt_dev->udp_dst_max = value;
pkt_dev->cur_udp_dst = value;
}
sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); return count;
} if (!strcmp(name, "clone_skb")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; /* clone_skb is not supported for netif_receive xmit_mode and * IMIX mode.
*/ if ((value > 0) &&
((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) return -EOPNOTSUPP; if (value > 0 && (pkt_dev->n_imix_entries > 0 ||
!(pkt_dev->flags & F_SHARED))) return -EINVAL;
pkt_dev->clone_skb = value;
sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); return count;
} if (!strcmp(name, "count")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
pkt_dev->count = value;
sprintf(pg_result, "OK: count=%llu",
(unsignedlonglong)pkt_dev->count); return count;
} if (!strcmp(name, "src_mac_count")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (pkt_dev->src_mac_count != value) {
pkt_dev->src_mac_count = value;
pkt_dev->cur_src_mac_offset = 0;
}
sprintf(pg_result, "OK: src_mac_count=%d",
pkt_dev->src_mac_count); return count;
} if (!strcmp(name, "dst_mac_count")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (pkt_dev->dst_mac_count != value) {
pkt_dev->dst_mac_count = value;
pkt_dev->cur_dst_mac_offset = 0;
}
sprintf(pg_result, "OK: dst_mac_count=%d",
pkt_dev->dst_mac_count); return count;
} if (!strcmp(name, "burst")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (value > 1 && !(pkt_dev->flags & F_SHARED)) return -EINVAL;
pkt_dev->burst = value < 1 ? 1 : value;
sprintf(pg_result, "OK: burst=%u", pkt_dev->burst); return count;
} if (!strcmp(name, "node")) {
max = min(10, count - i);
len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len;
if (node_possible(value)) {
pkt_dev->node = value;
sprintf(pg_result, "OK: node=%d", pkt_dev->node); if (pkt_dev->page) {
put_page(pkt_dev->page);
pkt_dev->page = NULL;
}
} else {
sprintf(pg_result, "ERROR: node not possible");
} return count;
} if (!strcmp(name, "xmit_mode")) { char f[32];
max = min(sizeof(f) - 1, count - i);
len = strn_len(&user_buffer[i], max); if (len < 0) return len;
memset(f, 0, sizeof(f)); if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT;
if (strcmp(f, "start_xmit") == 0) {
pkt_dev->xmit_mode = M_START_XMIT;
} elseif (strcmp(f, "netif_receive") == 0) { /* clone_skb set earlier, not supported in this mode */ if (pkt_dev->clone_skb > 0) return -EOPNOTSUPP;
pkt_dev->xmit_mode = M_NETIF_RECEIVE;
/* make sure new packet is allocated every time * pktgen_xmit() is called
*/
pkt_dev->last_ok = 1;
} elseif (strcmp(f, "queue_xmit") == 0) {
pkt_dev->xmit_mode = M_QUEUE_XMIT;
pkt_dev->last_ok = 1;
} else {
sprintf(pg_result, "xmit_mode -:%s:- unknown\nAvailable modes: %s",
f, "start_xmit, netif_receive\n"); return count;
}
sprintf(pg_result, "OK: xmit_mode=%s", f); return count;
} if (!strcmp(name, "flag")) { bool disable = false;
__u32 flag; char f[32]; char *end;
max = min(sizeof(f) - 1, count - i);
len = strn_len(&user_buffer[i], max); if (len < 0) return len;
memset(f, 0, 32); if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT;
flag = pktgen_read_flag(f, &disable); if (flag) { if (disable) { /* If "clone_skb", or "burst" parameters are * configured, it means that the skb still * needs to be referenced by the pktgen, so * the skb must be shared.
*/ if (flag == F_SHARED && (pkt_dev->clone_skb ||
pkt_dev->burst > 1)) return -EINVAL;
pkt_dev->flags &= ~flag;
} else {
pkt_dev->flags |= flag;
}
/* Unknown flag */
end = pkt_dev->result + sizeof(pkt_dev->result);
pg_result += sprintf(pg_result, "Flag -:%s:- unknown\n" "Available flags, (prepend ! to un-set flag):\n", f);
for (int n = 0; n < NR_PKT_FLAGS && pg_result < end; n++) { if (!IS_ENABLED(CONFIG_XFRM) && n == IPSEC_SHIFT) continue;
pg_result += snprintf(pg_result, end - pg_result, "%s, ", pkt_flag_names[n]);
} if (!WARN_ON_ONCE(pg_result >= end)) { /* Remove the comma and whitespace at the end */
*(pg_result - 2) = '\0';
}
return count;
} if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
max = min(sizeof(pkt_dev->dst_min) - 1, count - i);
len = strn_len(&user_buffer[i], max); if (len < 0) return len;
if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT;
buf[len] = 0; if (strcmp(buf, pkt_dev->dst_min) != 0) {
strscpy_pad(pkt_dev->dst_min, buf);
pkt_dev->daddr_min = in_aton(pkt_dev->dst_min);
pkt_dev->cur_daddr = pkt_dev->daddr_min;
} if (debug)
pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); return count;
} if (!strcmp(name, "dst_max")) {
max = min(sizeof(pkt_dev->dst_max) - 1, count - i);
len = strn_len(&user_buffer[i], max); if (len < 0) return len;
if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT;
buf[len] = 0; if (strcmp(buf, pkt_dev->dst_max) != 0) {
strscpy_pad(pkt_dev->dst_max, buf);
pkt_dev->daddr_max = in_aton(pkt_dev->dst_max);
pkt_dev->cur_daddr = pkt_dev->daddr_max;
} if (debug)
pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); return count;
} if (!strcmp(name, "dst6")) {
max = min(sizeof(buf) - 1, count - i);
len = strn_len(&user_buffer[i], max); if (len < 0) return len;
pkt_dev->flags |= F_IPV6;
if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT;
buf[len] = 0;
/* Read pkt_dev from the interface and set up internal pktgen_dev * structure to have the right information to create/send packets
*/ staticvoid pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{ int ntxq;
if (!pkt_dev->odev) {
pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
sprintf(pkt_dev->result, "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); return;
}
/* make sure that we don't pick a non-existing transmit queue */
ntxq = pkt_dev->odev->real_num_tx_queues;
if (ntxq <= pkt_dev->queue_map_min) {
pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odevname);
pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
} if (pkt_dev->queue_map_max >= ntxq) {
pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odevname);
pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
}
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr);
/* Set up Dest MAC */
ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac);
if (pkt_dev->flags & F_IPV6) { int i, set = 0, err = 1; struct inet6_dev *idev;
remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); if (remaining <= 0) goto out;
start_time = ktime_get(); if (remaining < 100000) { /* for small delays (<100us), just loop until limit is reached */ do {
end_time = ktime_get();
} while (ktime_compare(end_time, spin_until) < 0);
} else { do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS);
/* If there was already an IPSEC SA, we keep it as is, else * we go look for it ...
*/ #define DUMMY_MARK 0 staticvoid get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
{ #ifdef CONFIG_XFRM struct xfrm_state *x = pkt_dev->flows[flow].x; struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id);
if (!x) {
if (pkt_dev->spi) { /* We need as quick as possible to find the right SA * Searching with minimum criteria to achieve, this.
*/
x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET);
} else { /* slow path: we don't already have xfrm_state */
x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0,
(xfrm_address_t *)&pkt_dev->cur_daddr,
(xfrm_address_t *)&pkt_dev->cur_saddr,
AF_INET,
pkt_dev->ipsmode,
pkt_dev->ipsproto, 0);
} if (x) {
pkt_dev->flows[flow].x = x;
set_pkt_overhead(pkt_dev);
pkt_dev->pkt_overhead += x->props.header_len;
}
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
t = get_random_u32_inclusive(pkt_dev->queue_map_min,
pkt_dev->queue_map_max);
} else {
t = pkt_dev->cur_queue_map + 1; if (t > pkt_dev->queue_map_max)
t = pkt_dev->queue_map_min;
}
pkt_dev->cur_queue_map = t;
}
pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues;
}
/* Increment/randomize headers according to flags and current values * for IP src/dest, UDP src/dst port, MAC-Addr src/dst
*/ staticvoid mod_cur_headers(struct pktgen_dev *pkt_dev)
{
__u32 imn;
__u32 imx; int flow = 0;
if (pkt_dev->cflows)
flow = f_pick(pkt_dev);
/* Deal with source MAC */ if (pkt_dev->src_mac_count > 1) {
__u32 mc;
__u32 tmp;
if (pkt_dev->flags & F_MACSRC_RND)
mc = get_random_u32_below(pkt_dev->src_mac_count); else {
mc = pkt_dev->cur_src_mac_offset++; if (pkt_dev->cur_src_mac_offset >=
pkt_dev->src_mac_count)
pkt_dev->cur_src_mac_offset = 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.