if (unlikely(cpu >= nr_cpu_ids ||
!cpumask_test_cpu(cpu, cpu_online_mask))) {
cpu_index = id % cpumask_weight(cpu_online_mask);
cpu = cpumask_first(cpu_online_mask); for (i = 0; i < cpu_index; ++i)
cpu = cpumask_next(cpu, cpu_online_mask);
*stored_cpu = cpu;
} return cpu;
}
/* This function is racy, in the sense that it's called while last_cpu is * unlocked, so it could return the same CPU twice. Adding locking or using * atomic sequence numbers is slower though, and the consequences of racing are * harmless, so live with it.
*/ staticinlineint wg_cpumask_next_online(int *last_cpu)
{ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
WRITE_ONCE(*last_cpu, cpu); return cpu;
}
atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before.
*/ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) return -ENOSPC;
/* Then we queue it up in the device queue, which consumes the * packet as soon as it can.
*/
cpu = wg_cpumask_next_online(&device_queue->last_cpu); if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) return -EPIPE;
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); return 0;
}
staticinlinevoid wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
{ /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us.
*/ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
staticinlinevoid wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
{ /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us.
*/ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));